aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-08-09 22:25:21 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:31:14 -0400
commit8728b834b226ffcf2c94a58530090e292af2a7bf (patch)
tree2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /net
parent6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff)
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely redundant. All SKB list removal callers know which list the SKB is on, so storing this in sk_buff does nothing other than taking up some space. Two tricky bits were SCTP, which I took care of, and two ATM drivers which Francois Romieu <romieu@fr.zoreil.com> fixed up. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'net')
-rw-r--r--net/atm/ipcommon.c3
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/core/skbuff.c57
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c29
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/irda/irlap_frame.c6
-rw-r--r--net/lapb/lapb_subr.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c6
-rw-r--r--net/netrom/nr_subr.c2
-rw-r--r--net/rose/rose_subr.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/ulpqueue.c63
-rw-r--r--net/unix/garbage.c12
-rw-r--r--net/x25/x25_subr.c2
19 files changed, 105 insertions, 103 deletions
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 181a3002d8ad..4b1faca5013f 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -34,7 +34,6 @@
34 34
35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) 35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
36{ 36{
37 struct sk_buff *skb;
38 unsigned long flags; 37 unsigned long flags;
39 struct sk_buff *skb_from = (struct sk_buff *) from; 38 struct sk_buff *skb_from = (struct sk_buff *) from;
40 struct sk_buff *skb_to = (struct sk_buff *) to; 39 struct sk_buff *skb_to = (struct sk_buff *) to;
@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
47 prev->next = skb_to; 46 prev->next = skb_to;
48 to->prev->next = from->next; 47 to->prev->next = from->next;
49 to->prev = from->prev; 48 to->prev = from->prev;
50 for (skb = from->next; skb != skb_to; skb = skb->next)
51 skb->list = to;
52 to->qlen += from->qlen; 49 to->qlen += from->qlen;
53 spin_unlock(&to->lock); 50 spin_unlock(&to->lock);
54 from->prev = skb_from; 51 from->prev = skb_from;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 99694b57f6f5..eb7343c10a9f 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25)
76 if (skb_prev == NULL) 76 if (skb_prev == NULL)
77 skb_queue_head(&ax25->write_queue, skb); 77 skb_queue_head(&ax25->write_queue, skb);
78 else 78 else
79 skb_append(skb_prev, skb); 79 skb_append(skb_prev, skb, &ax25->write_queue);
80 skb_prev = skb; 80 skb_prev = skb;
81 } 81 }
82} 82}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 096991cb09d9..e6564b0a6839 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb)
281 281
282void __kfree_skb(struct sk_buff *skb) 282void __kfree_skb(struct sk_buff *skb)
283{ 283{
284 BUG_ON(skb->list != NULL);
285
286 dst_release(skb->dst); 284 dst_release(skb->dst);
287#ifdef CONFIG_XFRM 285#ifdef CONFIG_XFRM
288 secpath_put(skb->sp); 286 secpath_put(skb->sp);
@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
333#define C(x) n->x = skb->x 331#define C(x) n->x = skb->x
334 332
335 n->next = n->prev = NULL; 333 n->next = n->prev = NULL;
336 n->list = NULL;
337 n->sk = NULL; 334 n->sk = NULL;
338 C(stamp); 335 C(stamp);
339 C(dev); 336 C(dev);
@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
403 */ 400 */
404 unsigned long offset = new->data - old->data; 401 unsigned long offset = new->data - old->data;
405 402
406 new->list = NULL;
407 new->sk = NULL; 403 new->sk = NULL;
408 new->dev = old->dev; 404 new->dev = old->dev;
409 new->real_dev = old->real_dev; 405 new->real_dev = old->real_dev;
@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1342 __skb_queue_tail(list, newsk); 1338 __skb_queue_tail(list, newsk);
1343 spin_unlock_irqrestore(&list->lock, flags); 1339 spin_unlock_irqrestore(&list->lock, flags);
1344} 1340}
1341
1345/** 1342/**
1346 * skb_unlink - remove a buffer from a list 1343 * skb_unlink - remove a buffer from a list
1347 * @skb: buffer to remove 1344 * @skb: buffer to remove
1345 * @list: list to use
1348 * 1346 *
1349 * Place a packet after a given packet in a list. The list locks are taken 1347 * Remove a packet from a list. The list locks are taken and this
1350 * and this function is atomic with respect to other list locked calls 1348 * function is atomic with respect to other list locked calls
1351 * 1349 *
1352 * Works even without knowing the list it is sitting on, which can be 1350 * You must know what list the SKB is on.
1353 * handy at times. It also means that THE LIST MUST EXIST when you
1354 * unlink. Thus a list must have its contents unlinked before it is
1355 * destroyed.
1356 */ 1351 */
1357void skb_unlink(struct sk_buff *skb) 1352void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1358{ 1353{
1359 struct sk_buff_head *list = skb->list; 1354 unsigned long flags;
1360
1361 if (list) {
1362 unsigned long flags;
1363 1355
1364 spin_lock_irqsave(&list->lock, flags); 1356 spin_lock_irqsave(&list->lock, flags);
1365 if (skb->list == list) 1357 __skb_unlink(skb, list);
1366 __skb_unlink(skb, skb->list); 1358 spin_unlock_irqrestore(&list->lock, flags);
1367 spin_unlock_irqrestore(&list->lock, flags);
1368 }
1369} 1359}
1370 1360
1371
1372/** 1361/**
1373 * skb_append - append a buffer 1362 * skb_append - append a buffer
1374 * @old: buffer to insert after 1363 * @old: buffer to insert after
1375 * @newsk: buffer to insert 1364 * @newsk: buffer to insert
1365 * @list: list to use
1376 * 1366 *
1377 * Place a packet after a given packet in a list. The list locks are taken 1367 * Place a packet after a given packet in a list. The list locks are taken
1378 * and this function is atomic with respect to other list locked calls. 1368 * and this function is atomic with respect to other list locked calls.
1379 * A buffer cannot be placed on two lists at the same time. 1369 * A buffer cannot be placed on two lists at the same time.
1380 */ 1370 */
1381 1371void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1382void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1383{ 1372{
1384 unsigned long flags; 1373 unsigned long flags;
1385 1374
1386 spin_lock_irqsave(&old->list->lock, flags); 1375 spin_lock_irqsave(&list->lock, flags);
1387 __skb_append(old, newsk); 1376 __skb_append(old, newsk, list);
1388 spin_unlock_irqrestore(&old->list->lock, flags); 1377 spin_unlock_irqrestore(&list->lock, flags);
1389} 1378}
1390 1379
1391 1380
@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1393 * skb_insert - insert a buffer 1382 * skb_insert - insert a buffer
1394 * @old: buffer to insert before 1383 * @old: buffer to insert before
1395 * @newsk: buffer to insert 1384 * @newsk: buffer to insert
1385 * @list: list to use
1386 *
1387 * Place a packet before a given packet in a list. The list locks are
1388 * taken and this function is atomic with respect to other list locked
1389 * calls.
1396 * 1390 *
1397 * Place a packet before a given packet in a list. The list locks are taken
1398 * and this function is atomic with respect to other list locked calls
1399 * A buffer cannot be placed on two lists at the same time. 1391 * A buffer cannot be placed on two lists at the same time.
1400 */ 1392 */
1401 1393void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1402void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
1403{ 1394{
1404 unsigned long flags; 1395 unsigned long flags;
1405 1396
1406 spin_lock_irqsave(&old->list->lock, flags); 1397 spin_lock_irqsave(&list->lock, flags);
1407 __skb_insert(newsk, old->prev, old, old->list); 1398 __skb_insert(newsk, old->prev, old, list);
1408 spin_unlock_irqrestore(&old->list->lock, flags); 1399 spin_unlock_irqrestore(&list->lock, flags);
1409} 1400}
1410 1401
1411#if 0 1402#if 0
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index acdd18e6adb2..0c30409fe9e5 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1763 nskb = skb->next; 1763 nskb = skb->next;
1764 1764
1765 if (skb->len == 0) { 1765 if (skb->len == 0) {
1766 skb_unlink(skb); 1766 skb_unlink(skb, queue);
1767 kfree_skb(skb); 1767 kfree_skb(skb);
1768 /* 1768 /*
1769 * N.B. Don't refer to skb or cb after this point 1769 * N.B. Don't refer to skb or cb after this point
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 8cce1fdbda90..e0bebf4bbcad 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
479 xmit_count = cb2->xmit_count; 479 xmit_count = cb2->xmit_count;
480 segnum = cb2->segnum; 480 segnum = cb2->segnum;
481 /* Remove and drop ack'ed packet */ 481 /* Remove and drop ack'ed packet */
482 skb_unlink(ack); 482 skb_unlink(ack, q);
483 kfree_skb(ack); 483 kfree_skb(ack);
484 ack = NULL; 484 ack = NULL;
485 485
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index de691e119e17..b807a314269e 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -869,7 +869,7 @@ static void aun_tx_ack(unsigned long seq, int result)
869 869
870foundit: 870foundit:
871 tx_result(skb->sk, eb->cookie, result); 871 tx_result(skb->sk, eb->cookie, result);
872 skb_unlink(skb); 872 skb_unlink(skb, &aun_queue);
873 spin_unlock_irqrestore(&aun_queue_lock, flags); 873 spin_unlock_irqrestore(&aun_queue_lock, flags);
874 kfree_skb(skb); 874 kfree_skb(skb);
875} 875}
@@ -947,7 +947,7 @@ static void ab_cleanup(unsigned long h)
947 { 947 {
948 tx_result(skb->sk, eb->cookie, 948 tx_result(skb->sk, eb->cookie,
949 ECTYPE_TRANSMIT_NOT_PRESENT); 949 ECTYPE_TRANSMIT_NOT_PRESENT);
950 skb_unlink(skb); 950 skb_unlink(skb, &aun_queue);
951 kfree_skb(skb); 951 kfree_skb(skb);
952 } 952 }
953 skb = newskb; 953 skb = newskb;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 69b1fcf70077..d2696af46c70 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -975,7 +975,7 @@ do_fault:
975 if (!skb->len) { 975 if (!skb->len) {
976 if (sk->sk_send_head == skb) 976 if (sk->sk_send_head == skb)
977 sk->sk_send_head = NULL; 977 sk->sk_send_head = NULL;
978 __skb_unlink(skb, skb->list); 978 __skb_unlink(skb, &sk->sk_write_queue);
979 sk_stream_free_skb(sk, skb); 979 sk_stream_free_skb(sk, skb);
980 } 980 }
981 981
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53a8a5399f1e..ffa24025cd02 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2085 seq_rtt = now - scb->when; 2085 seq_rtt = now - scb->when;
2086 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2086 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2087 tcp_packets_out_dec(tp, skb); 2087 tcp_packets_out_dec(tp, skb);
2088 __skb_unlink(skb, skb->list); 2088 __skb_unlink(skb, &sk->sk_write_queue);
2089 sk_stream_free_skb(sk, skb); 2089 sk_stream_free_skb(sk, skb);
2090 } 2090 }
2091 2091
@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk)
2853 2853
2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
2855 SOCK_DEBUG(sk, "ofo packet was already received \n"); 2855 SOCK_DEBUG(sk, "ofo packet was already received \n");
2856 __skb_unlink(skb, skb->list); 2856 __skb_unlink(skb, &tp->out_of_order_queue);
2857 __kfree_skb(skb); 2857 __kfree_skb(skb);
2858 continue; 2858 continue;
2859 } 2859 }
@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk)
2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
2862 TCP_SKB_CB(skb)->end_seq); 2862 TCP_SKB_CB(skb)->end_seq);
2863 2863
2864 __skb_unlink(skb, skb->list); 2864 __skb_unlink(skb, &tp->out_of_order_queue);
2865 __skb_queue_tail(&sk->sk_receive_queue, skb); 2865 __skb_queue_tail(&sk->sk_receive_queue, skb);
2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
2867 if(skb->h.th->fin) 2867 if(skb->h.th->fin)
@@ -3027,7 +3027,7 @@ drop:
3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3028 3028
3029 if (seq == TCP_SKB_CB(skb1)->end_seq) { 3029 if (seq == TCP_SKB_CB(skb1)->end_seq) {
3030 __skb_append(skb1, skb); 3030 __skb_append(skb1, skb, &tp->out_of_order_queue);
3031 3031
3032 if (!tp->rx_opt.num_sacks || 3032 if (!tp->rx_opt.num_sacks ||
3033 tp->selective_acks[0].end_seq != seq) 3033 tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3071,7 @@ drop:
3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); 3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3072 break; 3072 break;
3073 } 3073 }
3074 __skb_unlink(skb1, skb1->list); 3074 __skb_unlink(skb1, &tp->out_of_order_queue);
3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); 3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3076 __kfree_skb(skb1); 3076 __kfree_skb(skb1);
3077 } 3077 }
@@ -3088,8 +3088,9 @@ add_sack:
3088 * simplifies code) 3088 * simplifies code)
3089 */ 3089 */
3090static void 3090static void
3091tcp_collapse(struct sock *sk, struct sk_buff *head, 3091tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3092 struct sk_buff *tail, u32 start, u32 end) 3092 struct sk_buff *head, struct sk_buff *tail,
3093 u32 start, u32 end)
3093{ 3094{
3094 struct sk_buff *skb; 3095 struct sk_buff *skb;
3095 3096
@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3099 /* No new bits? It is possible on ofo queue. */ 3100 /* No new bits? It is possible on ofo queue. */
3100 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3101 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3101 struct sk_buff *next = skb->next; 3102 struct sk_buff *next = skb->next;
3102 __skb_unlink(skb, skb->list); 3103 __skb_unlink(skb, list);
3103 __kfree_skb(skb); 3104 __kfree_skb(skb);
3104 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3105 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3105 skb = next; 3106 skb = next;
@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3145 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head); 3146 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3146 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 3147 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 3148 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3148 __skb_insert(nskb, skb->prev, skb, skb->list); 3149 __skb_insert(nskb, skb->prev, skb, list);
3149 sk_stream_set_owner_r(nskb, sk); 3150 sk_stream_set_owner_r(nskb, sk);
3150 3151
3151 /* Copy data, releasing collapsed skbs. */ 3152 /* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3164 } 3165 }
3165 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3166 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3166 struct sk_buff *next = skb->next; 3167 struct sk_buff *next = skb->next;
3167 __skb_unlink(skb, skb->list); 3168 __skb_unlink(skb, list);
3168 __kfree_skb(skb); 3169 __kfree_skb(skb);
3169 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3170 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3170 skb = next; 3171 skb = next;
@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
3200 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 3201 if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3201 after(TCP_SKB_CB(skb)->seq, end) || 3202 after(TCP_SKB_CB(skb)->seq, end) ||
3202 before(TCP_SKB_CB(skb)->end_seq, start)) { 3203 before(TCP_SKB_CB(skb)->end_seq, start)) {
3203 tcp_collapse(sk, head, skb, start, end); 3204 tcp_collapse(sk, &tp->out_of_order_queue,
3205 head, skb, start, end);
3204 head = skb; 3206 head = skb;
3205 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 3207 if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3206 break; 3208 break;
@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk)
3237 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 3239 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3238 3240
3239 tcp_collapse_ofo_queue(sk); 3241 tcp_collapse_ofo_queue(sk);
3240 tcp_collapse(sk, sk->sk_receive_queue.next, 3242 tcp_collapse(sk, &sk->sk_receive_queue,
3243 sk->sk_receive_queue.next,
3241 (struct sk_buff*)&sk->sk_receive_queue, 3244 (struct sk_buff*)&sk->sk_receive_queue,
3242 tp->copied_seq, tp->rcv_nxt); 3245 tp->copied_seq, tp->rcv_nxt);
3243 sk_stream_mem_reclaim(sk); 3246 sk_stream_mem_reclaim(sk);
@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3462 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 3465 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
3463 tp->copied_seq++; 3466 tp->copied_seq++;
3464 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 3467 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3465 __skb_unlink(skb, skb->list); 3468 __skb_unlink(skb, &sk->sk_receive_queue);
3466 __kfree_skb(skb); 3469 __kfree_skb(skb);
3467 } 3470 }
3468 } 3471 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dd30dd137b74..a4d1eb9a0926 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -505,7 +505,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
505 505
506 /* Link BUFF into the send queue. */ 506 /* Link BUFF into the send queue. */
507 skb_header_release(buff); 507 skb_header_release(buff);
508 __skb_append(skb, buff); 508 __skb_append(skb, buff, &sk->sk_write_queue);
509 509
510 return 0; 510 return 0;
511} 511}
@@ -893,7 +893,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
893 893
894 /* Link BUFF into the send queue. */ 894 /* Link BUFF into the send queue. */
895 skb_header_release(buff); 895 skb_header_release(buff);
896 __skb_append(skb, buff); 896 __skb_append(skb, buff, &sk->sk_write_queue);
897 897
898 return 0; 898 return 0;
899} 899}
@@ -1238,7 +1238,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1238 tcp_skb_pcount(next_skb) != 1); 1238 tcp_skb_pcount(next_skb) != 1);
1239 1239
1240 /* Ok. We will be able to collapse the packet. */ 1240 /* Ok. We will be able to collapse the packet. */
1241 __skb_unlink(next_skb, next_skb->list); 1241 __skb_unlink(next_skb, &sk->sk_write_queue);
1242 1242
1243 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); 1243 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
1244 1244
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 6dafbb43b529..eb65b4925b51 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
988 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 988 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
989 return; 989 return;
990 } 990 }
991 /* Unlink tx_skb from list */
992 tx_skb->next = tx_skb->prev = NULL;
993 tx_skb->list = NULL;
994 991
995 /* Clear old Nr field + poll bit */ 992 /* Clear old Nr field + poll bit */
996 tx_skb->data[1] &= 0x0f; 993 tx_skb->data[1] &= 0x0f;
@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
1063 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 1060 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
1064 return; 1061 return;
1065 } 1062 }
1066 /* Unlink tx_skb from list */
1067 tx_skb->next = tx_skb->prev = NULL;
1068 tx_skb->list = NULL;
1069 1063
1070 /* Clear old Nr field + poll bit */ 1064 /* Clear old Nr field + poll bit */
1071 tx_skb->data[1] &= 0x0f; 1065 tx_skb->data[1] &= 0x0f;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 5de05a0bc0ff..8b5eefd70f03 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb *lapb)
78 if (!skb_prev) 78 if (!skb_prev)
79 skb_queue_head(&lapb->write_queue, skb); 79 skb_queue_head(&lapb->write_queue, skb);
80 else 80 else
81 skb_append(skb_prev, skb); 81 skb_append(skb_prev, skb, &lapb->write_queue);
82 skb_prev = skb; 82 skb_prev = skb;
83 } 83 }
84} 84}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 20b4cfebd74c..f49b82da8264 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
714 if (uaddr) 714 if (uaddr)
715 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 715 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
716 msg->msg_namelen = sizeof(*uaddr); 716 msg->msg_namelen = sizeof(*uaddr);
717 if (!skb->list) { 717 if (!skb->next) {
718dgram_free: 718dgram_free:
719 kfree_skb(skb); 719 kfree_skb(skb);
720 } 720 }
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index eba812a9c69c..571548619469 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
71 71
72 if (!ev->ind_prim && !ev->cfm_prim) { 72 if (!ev->ind_prim && !ev->cfm_prim) {
73 /* indicate or confirm not required */ 73 /* indicate or confirm not required */
74 if (!skb->list) 74 /* XXX this is not very pretty, perhaps we should store
75 * XXX indicate/confirm-needed state in the llc_conn_state_ev
76 * XXX control block of the SKB instead? -DaveM
77 */
78 if (!skb->next)
75 goto out_kfree_skb; 79 goto out_kfree_skb;
76 goto out_skb_put; 80 goto out_skb_put;
77 } 81 }
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 0627347b14b8..252c1b3ecd78 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk)
77 if (skb_prev == NULL) 77 if (skb_prev == NULL)
78 skb_queue_head(&sk->sk_write_queue, skb); 78 skb_queue_head(&sk->sk_write_queue, skb);
79 else 79 else
80 skb_append(skb_prev, skb); 80 skb_append(skb_prev, skb, &sk->sk_write_queue);
81 skb_prev = skb; 81 skb_prev = skb;
82 } 82 }
83} 83}
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7db7e1cedc3a..ae135e27799b 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk)
74 if (skb_prev == NULL) 74 if (skb_prev == NULL)
75 skb_queue_head(&sk->sk_write_queue, skb); 75 skb_queue_head(&sk->sk_write_queue, skb);
76 else 76 else
77 skb_append(skb_prev, skb); 77 skb_append(skb_prev, skb, &sk->sk_write_queue);
78 skb_prev = skb; 78 skb_prev = skb;
79 } 79 }
80} 80}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 091a66f06a35..4454afe4727e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4892 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 4892 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
4893 event = sctp_skb2event(skb); 4893 event = sctp_skb2event(skb);
4894 if (event->asoc == assoc) { 4894 if (event->asoc == assoc) {
4895 __skb_unlink(skb, skb->list); 4895 __skb_unlink(skb, &oldsk->sk_receive_queue);
4896 __skb_queue_tail(&newsk->sk_receive_queue, skb); 4896 __skb_queue_tail(&newsk->sk_receive_queue, skb);
4897 } 4897 }
4898 } 4898 }
@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4921 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 4921 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
4922 event = sctp_skb2event(skb); 4922 event = sctp_skb2event(skb);
4923 if (event->asoc == assoc) { 4923 if (event->asoc == assoc) {
4924 __skb_unlink(skb, skb->list); 4924 __skb_unlink(skb, &oldsp->pd_lobby);
4925 __skb_queue_tail(queue, skb); 4925 __skb_queue_tail(queue, skb);
4926 } 4926 }
4927 } 4927 }
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 8bbc279d6c99..ec2c857eae7f 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -50,9 +50,9 @@
50 50
51/* Forward declarations for internal helpers. */ 51/* Forward declarations for internal helpers. */
52static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, 52static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *); 53 struct sctp_ulpevent *);
54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, 54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *); 55 struct sctp_ulpevent *);
56 56
57/* 1st Level Abstractions */ 57/* 1st Level Abstractions */
58 58
@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
125 event = sctp_ulpq_order(ulpq, event); 125 event = sctp_ulpq_order(ulpq, event);
126 } 126 }
127 127
128 /* Send event to the ULP. */ 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
129 * very first SKB on the 'temp' list.
130 */
129 if (event) 131 if (event)
130 sctp_ulpq_tail_event(ulpq, event); 132 sctp_ulpq_tail_event(ulpq, event);
131 133
@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
158 return sctp_clear_pd(ulpq->asoc->base.sk); 160 return sctp_clear_pd(ulpq->asoc->base.sk);
159} 161}
160 162
161 163/* If the SKB of 'event' is on a list, it is the first such member
162 164 * of that list.
165 */
163int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) 166int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
164{ 167{
165 struct sock *sk = ulpq->asoc->base.sk; 168 struct sock *sk = ulpq->asoc->base.sk;
166 struct sk_buff_head *queue; 169 struct sk_buff_head *queue, *skb_list;
170 struct sk_buff *skb = sctp_event2skb(event);
167 int clear_pd = 0; 171 int clear_pd = 0;
168 172
173 skb_list = (struct sk_buff_head *) skb->prev;
174
169 /* If the socket is just going to throw this away, do not 175 /* If the socket is just going to throw this away, do not
170 * even try to deliver it. 176 * even try to deliver it.
171 */ 177 */
@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
197 /* If we are harvesting multiple skbs they will be 203 /* If we are harvesting multiple skbs they will be
198 * collected on a list. 204 * collected on a list.
199 */ 205 */
200 if (sctp_event2skb(event)->list) 206 if (skb_list)
201 sctp_skb_list_tail(sctp_event2skb(event)->list, queue); 207 sctp_skb_list_tail(skb_list, queue);
202 else 208 else
203 __skb_queue_tail(queue, sctp_event2skb(event)); 209 __skb_queue_tail(queue, skb);
204 210
205 /* Did we just complete partial delivery and need to get 211 /* Did we just complete partial delivery and need to get
206 * rolling again? Move pending data to the receive 212 * rolling again? Move pending data to the receive
@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
214 return 1; 220 return 1;
215 221
216out_free: 222out_free:
217 if (sctp_event2skb(event)->list) 223 if (skb_list)
218 sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); 224 sctp_queue_purge_ulpevents(skb_list);
219 else 225 else
220 sctp_ulpevent_free(event); 226 sctp_ulpevent_free(event);
227
221 return 0; 228 return 0;
222} 229}
223 230
@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
269 * payload was fragmented on the way and ip had to reassemble them. 276 * payload was fragmented on the way and ip had to reassemble them.
270 * We add the rest of skb's to the first skb's fraglist. 277 * We add the rest of skb's to the first skb's fraglist.
271 */ 278 */
272static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) 279static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
273{ 280{
274 struct sk_buff *pos; 281 struct sk_buff *pos;
275 struct sctp_ulpevent *event; 282 struct sctp_ulpevent *event;
@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
294 skb_shinfo(f_frag)->frag_list = pos; 301 skb_shinfo(f_frag)->frag_list = pos;
295 302
296 /* Remove the first fragment from the reassembly queue. */ 303 /* Remove the first fragment from the reassembly queue. */
297 __skb_unlink(f_frag, f_frag->list); 304 __skb_unlink(f_frag, queue);
298 while (pos) { 305 while (pos) {
299 306
300 pnext = pos->next; 307 pnext = pos->next;
@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
304 f_frag->data_len += pos->len; 311 f_frag->data_len += pos->len;
305 312
306 /* Remove the fragment from the reassembly queue. */ 313 /* Remove the fragment from the reassembly queue. */
307 __skb_unlink(pos, pos->list); 314 __skb_unlink(pos, queue);
308 315
309 /* Break if we have reached the last fragment. */ 316 /* Break if we have reached the last fragment. */
310 if (pos == l_frag) 317 if (pos == l_frag)
@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
375done: 382done:
376 return retval; 383 return retval;
377found: 384found:
378 retval = sctp_make_reassembled_event(first_frag, pos); 385 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
379 if (retval) 386 if (retval)
380 retval->msg_flags |= MSG_EOR; 387 retval->msg_flags |= MSG_EOR;
381 goto done; 388 goto done;
@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
435 * further. 442 * further.
436 */ 443 */
437done: 444done:
438 retval = sctp_make_reassembled_event(first_frag, last_frag); 445 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
439 if (retval && is_last) 446 if (retval && is_last)
440 retval->msg_flags |= MSG_EOR; 447 retval->msg_flags |= MSG_EOR;
441 448
@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
527 * further. 534 * further.
528 */ 535 */
529done: 536done:
530 retval = sctp_make_reassembled_event(first_frag, last_frag); 537 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
531 return retval; 538 return retval;
532} 539}
533 540
@@ -537,6 +544,7 @@ done:
537static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, 544static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
538 struct sctp_ulpevent *event) 545 struct sctp_ulpevent *event)
539{ 546{
547 struct sk_buff_head *event_list;
540 struct sk_buff *pos, *tmp; 548 struct sk_buff *pos, *tmp;
541 struct sctp_ulpevent *cevent; 549 struct sctp_ulpevent *cevent;
542 struct sctp_stream *in; 550 struct sctp_stream *in;
@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
547 ssn = event->ssn; 555 ssn = event->ssn;
548 in = &ulpq->asoc->ssnmap->in; 556 in = &ulpq->asoc->ssnmap->in;
549 557
558 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
559
550 /* We are holding the chunks by stream, by SSN. */ 560 /* We are holding the chunks by stream, by SSN. */
551 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 561 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
552 cevent = (struct sctp_ulpevent *) pos->cb; 562 cevent = (struct sctp_ulpevent *) pos->cb;
@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
567 /* Found it, so mark in the ssnmap. */ 577 /* Found it, so mark in the ssnmap. */
568 sctp_ssn_next(in, sid); 578 sctp_ssn_next(in, sid);
569 579
570 __skb_unlink(pos, pos->list); 580 __skb_unlink(pos, &ulpq->lobby);
571 581
572 /* Attach all gathered skbs to the event. */ 582 /* Attach all gathered skbs to the event. */
573 __skb_queue_tail(sctp_event2skb(event)->list, pos); 583 __skb_queue_tail(event_list, pos);
574 } 584 }
575} 585}
576 586
@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
626} 636}
627 637
628static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, 638static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
629 struct sctp_ulpevent *event) 639 struct sctp_ulpevent *event)
630{ 640{
631 __u16 sid, ssn; 641 __u16 sid, ssn;
632 struct sctp_stream *in; 642 struct sctp_stream *in;
@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
667{ 677{
668 struct sk_buff *pos, *tmp; 678 struct sk_buff *pos, *tmp;
669 struct sctp_ulpevent *cevent; 679 struct sctp_ulpevent *cevent;
670 struct sctp_ulpevent *event = NULL; 680 struct sctp_ulpevent *event;
671 struct sctp_stream *in; 681 struct sctp_stream *in;
672 struct sk_buff_head temp; 682 struct sk_buff_head temp;
673 __u16 csid, cssn; 683 __u16 csid, cssn;
@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
675 in = &ulpq->asoc->ssnmap->in; 685 in = &ulpq->asoc->ssnmap->in;
676 686
677 /* We are holding the chunks by stream, by SSN. */ 687 /* We are holding the chunks by stream, by SSN. */
688 skb_queue_head_init(&temp);
689 event = NULL;
678 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 690 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
679 cevent = (struct sctp_ulpevent *) pos->cb; 691 cevent = (struct sctp_ulpevent *) pos->cb;
680 csid = cevent->stream; 692 csid = cevent->stream;
@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
686 /* Found it, so mark in the ssnmap. */ 698 /* Found it, so mark in the ssnmap. */
687 sctp_ssn_next(in, csid); 699 sctp_ssn_next(in, csid);
688 700
689 __skb_unlink(pos, pos->list); 701 __skb_unlink(pos, &ulpq->lobby);
690 if (!event) { 702 if (!event) {
691 /* Create a temporary list to collect chunks on. */ 703 /* Create a temporary list to collect chunks on. */
692 event = sctp_skb2event(pos); 704 event = sctp_skb2event(pos);
693 skb_queue_head_init(&temp);
694 __skb_queue_tail(&temp, sctp_event2skb(event)); 705 __skb_queue_tail(&temp, sctp_event2skb(event));
695 } else { 706 } else {
696 /* Attach all gathered skbs to the event. */ 707 /* Attach all gathered skbs to the event. */
697 __skb_queue_tail(sctp_event2skb(event)->list, pos); 708 __skb_queue_tail(&temp, pos);
698 } 709 }
699 } 710 }
700 711
701 /* Send event to the ULP. */ 712 /* Send event to the ULP. 'event' is the sctp_ulpevent for
713 * very first SKB on the 'temp' list.
714 */
702 if (event) 715 if (event)
703 sctp_ulpq_tail_event(ulpq, event); 716 sctp_ulpq_tail_event(ulpq, event);
704} 717}
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 4bd95c8f5934..46252d2807bb 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -286,16 +286,16 @@ void unix_gc(void)
286 skb = skb_peek(&s->sk_receive_queue); 286 skb = skb_peek(&s->sk_receive_queue);
287 while (skb && 287 while (skb &&
288 skb != (struct sk_buff *)&s->sk_receive_queue) { 288 skb != (struct sk_buff *)&s->sk_receive_queue) {
289 nextsk=skb->next; 289 nextsk = skb->next;
290 /* 290 /*
291 * Do we have file descriptors ? 291 * Do we have file descriptors ?
292 */ 292 */
293 if(UNIXCB(skb).fp) 293 if (UNIXCB(skb).fp) {
294 { 294 __skb_unlink(skb,
295 __skb_unlink(skb, skb->list); 295 &s->sk_receive_queue);
296 __skb_queue_tail(&hitlist,skb); 296 __skb_queue_tail(&hitlist, skb);
297 } 297 }
298 skb=nextsk; 298 skb = nextsk;
299 } 299 }
300 spin_unlock(&s->sk_receive_queue.lock); 300 spin_unlock(&s->sk_receive_queue.lock);
301 } 301 }
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 7fd872ad0c20..e20cfadad4d9 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk)
80 if (!skb_prev) 80 if (!skb_prev)
81 skb_queue_head(&sk->sk_write_queue, skb); 81 skb_queue_head(&sk->sk_write_queue, skb);
82 else 82 else
83 skb_append(skb_prev, skb); 83 skb_append(skb_prev, skb, &sk->sk_write_queue);
84 skb_prev = skb; 84 skb_prev = skb;
85 } 85 }
86} 86}