aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/atm/nicstar.c157
-rw-r--r--drivers/atm/nicstar.h16
-rw-r--r--drivers/atm/zatm.c8
-rw-r--r--drivers/bluetooth/bfusb.c8
-rw-r--r--drivers/ieee1394/ieee1394_core.c4
-rw-r--r--drivers/isdn/act2000/capi.c2
-rw-r--r--drivers/net/shaper.c50
-rw-r--r--drivers/net/wan/sdla_fr.c22
-rw-r--r--drivers/usb/net/usbnet.c21
-rw-r--r--include/linux/skbuff.h16
-rw-r--r--net/atm/ipcommon.c3
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/core/skbuff.c57
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c29
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/irda/irlap_frame.c6
-rw-r--r--net/lapb/lapb_subr.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c6
-rw-r--r--net/netrom/nr_subr.c2
-rw-r--r--net/rose/rose_subr.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/ulpqueue.c63
-rw-r--r--net/unix/garbage.c12
-rw-r--r--net/x25/x25_subr.c2
29 files changed, 229 insertions, 283 deletions
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b2a7b754fd14..a0e3bd861f1c 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -214,8 +214,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
214static void __devinit ns_init_card_error(ns_dev *card, int error); 214static void __devinit ns_init_card_error(ns_dev *card, int error);
215static scq_info *get_scq(int size, u32 scd); 215static scq_info *get_scq(int size, u32 scd);
216static void free_scq(scq_info *scq, struct atm_vcc *vcc); 216static void free_scq(scq_info *scq, struct atm_vcc *vcc);
217static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 217static void push_rxbufs(ns_dev *, struct sk_buff *);
218 u32 handle2, u32 addr2);
219static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs); 218static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
220static int ns_open(struct atm_vcc *vcc); 219static int ns_open(struct atm_vcc *vcc);
221static void ns_close(struct atm_vcc *vcc); 220static void ns_close(struct atm_vcc *vcc);
@@ -766,6 +765,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
766 ns_init_card_error(card, error); 765 ns_init_card_error(card, error);
767 return error; 766 return error;
768 } 767 }
768 NS_SKB_CB(hb)->buf_type = BUF_NONE;
769 skb_queue_tail(&card->hbpool.queue, hb); 769 skb_queue_tail(&card->hbpool.queue, hb);
770 card->hbpool.count++; 770 card->hbpool.count++;
771 } 771 }
@@ -786,9 +786,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
786 ns_init_card_error(card, error); 786 ns_init_card_error(card, error);
787 return error; 787 return error;
788 } 788 }
789 NS_SKB_CB(lb)->buf_type = BUF_LG;
789 skb_queue_tail(&card->lbpool.queue, lb); 790 skb_queue_tail(&card->lbpool.queue, lb);
790 skb_reserve(lb, NS_SMBUFSIZE); 791 skb_reserve(lb, NS_SMBUFSIZE);
791 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 792 push_rxbufs(card, lb);
792 /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 793 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
793 if (j == 1) 794 if (j == 1)
794 { 795 {
@@ -822,9 +823,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
822 ns_init_card_error(card, error); 823 ns_init_card_error(card, error);
823 return error; 824 return error;
824 } 825 }
826 NS_SKB_CB(sb)->buf_type = BUF_SM;
825 skb_queue_tail(&card->sbpool.queue, sb); 827 skb_queue_tail(&card->sbpool.queue, sb);
826 skb_reserve(sb, NS_AAL0_HEADER); 828 skb_reserve(sb, NS_AAL0_HEADER);
827 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 829 push_rxbufs(card, sb);
828 } 830 }
829 /* Test for strange behaviour which leads to crashes */ 831 /* Test for strange behaviour which leads to crashes */
830 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) 832 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
@@ -852,6 +854,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
852 ns_init_card_error(card, error); 854 ns_init_card_error(card, error);
853 return error; 855 return error;
854 } 856 }
857 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
855 skb_queue_tail(&card->iovpool.queue, iovb); 858 skb_queue_tail(&card->iovpool.queue, iovb);
856 card->iovpool.count++; 859 card->iovpool.count++;
857 } 860 }
@@ -1078,12 +1081,18 @@ static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1078 1081
1079/* The handles passed must be pointers to the sk_buff containing the small 1082/* The handles passed must be pointers to the sk_buff containing the small
1080 or large buffer(s) cast to u32. */ 1083 or large buffer(s) cast to u32. */
1081static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 1084static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
1082 u32 handle2, u32 addr2)
1083{ 1085{
1086 struct ns_skb_cb *cb = NS_SKB_CB(skb);
1087 u32 handle1, addr1;
1088 u32 handle2, addr2;
1084 u32 stat; 1089 u32 stat;
1085 unsigned long flags; 1090 unsigned long flags;
1086 1091
1092 /* *BARF* */
1093 handle2 = addr2 = 0;
1094 handle1 = (u32)skb;
1095 addr1 = (u32)virt_to_bus(skb->data);
1087 1096
1088#ifdef GENERAL_DEBUG 1097#ifdef GENERAL_DEBUG
1089 if (!addr1) 1098 if (!addr1)
@@ -1093,7 +1102,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1093 stat = readl(card->membase + STAT); 1102 stat = readl(card->membase + STAT);
1094 card->sbfqc = ns_stat_sfbqc_get(stat); 1103 card->sbfqc = ns_stat_sfbqc_get(stat);
1095 card->lbfqc = ns_stat_lfbqc_get(stat); 1104 card->lbfqc = ns_stat_lfbqc_get(stat);
1096 if (type == BUF_SM) 1105 if (cb->buf_type == BUF_SM)
1097 { 1106 {
1098 if (!addr2) 1107 if (!addr2)
1099 { 1108 {
@@ -1111,7 +1120,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1111 } 1120 }
1112 } 1121 }
1113 } 1122 }
1114 else /* type == BUF_LG */ 1123 else /* buf_type == BUF_LG */
1115 { 1124 {
1116 if (!addr2) 1125 if (!addr2)
1117 { 1126 {
@@ -1132,26 +1141,26 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1132 1141
1133 if (addr2) 1142 if (addr2)
1134 { 1143 {
1135 if (type == BUF_SM) 1144 if (cb->buf_type == BUF_SM)
1136 { 1145 {
1137 if (card->sbfqc >= card->sbnr.max) 1146 if (card->sbfqc >= card->sbnr.max)
1138 { 1147 {
1139 skb_unlink((struct sk_buff *) handle1); 1148 skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
1140 dev_kfree_skb_any((struct sk_buff *) handle1); 1149 dev_kfree_skb_any((struct sk_buff *) handle1);
1141 skb_unlink((struct sk_buff *) handle2); 1150 skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
1142 dev_kfree_skb_any((struct sk_buff *) handle2); 1151 dev_kfree_skb_any((struct sk_buff *) handle2);
1143 return; 1152 return;
1144 } 1153 }
1145 else 1154 else
1146 card->sbfqc += 2; 1155 card->sbfqc += 2;
1147 } 1156 }
1148 else /* (type == BUF_LG) */ 1157 else /* (buf_type == BUF_LG) */
1149 { 1158 {
1150 if (card->lbfqc >= card->lbnr.max) 1159 if (card->lbfqc >= card->lbnr.max)
1151 { 1160 {
1152 skb_unlink((struct sk_buff *) handle1); 1161 skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
1153 dev_kfree_skb_any((struct sk_buff *) handle1); 1162 dev_kfree_skb_any((struct sk_buff *) handle1);
1154 skb_unlink((struct sk_buff *) handle2); 1163 skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
1155 dev_kfree_skb_any((struct sk_buff *) handle2); 1164 dev_kfree_skb_any((struct sk_buff *) handle2);
1156 return; 1165 return;
1157 } 1166 }
@@ -1166,12 +1175,12 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1166 writel(handle2, card->membase + DR2); 1175 writel(handle2, card->membase + DR2);
1167 writel(addr1, card->membase + DR1); 1176 writel(addr1, card->membase + DR1);
1168 writel(handle1, card->membase + DR0); 1177 writel(handle1, card->membase + DR0);
1169 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD); 1178 writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
1170 1179
1171 spin_unlock_irqrestore(&card->res_lock, flags); 1180 spin_unlock_irqrestore(&card->res_lock, flags);
1172 1181
1173 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, 1182 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1174 (type == BUF_SM ? "small" : "large"), addr1, addr2); 1183 (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
1175 } 1184 }
1176 1185
1177 if (!card->efbie && card->sbfqc >= card->sbnr.min && 1186 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
@@ -1322,9 +1331,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1322 card->efbie = 0; 1331 card->efbie = 0;
1323 break; 1332 break;
1324 } 1333 }
1334 NS_SKB_CB(sb)->buf_type = BUF_SM;
1325 skb_queue_tail(&card->sbpool.queue, sb); 1335 skb_queue_tail(&card->sbpool.queue, sb);
1326 skb_reserve(sb, NS_AAL0_HEADER); 1336 skb_reserve(sb, NS_AAL0_HEADER);
1327 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 1337 push_rxbufs(card, sb);
1328 } 1338 }
1329 card->sbfqc = i; 1339 card->sbfqc = i;
1330 process_rsq(card); 1340 process_rsq(card);
@@ -1348,9 +1358,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1348 card->efbie = 0; 1358 card->efbie = 0;
1349 break; 1359 break;
1350 } 1360 }
1361 NS_SKB_CB(lb)->buf_type = BUF_LG;
1351 skb_queue_tail(&card->lbpool.queue, lb); 1362 skb_queue_tail(&card->lbpool.queue, lb);
1352 skb_reserve(lb, NS_SMBUFSIZE); 1363 skb_reserve(lb, NS_SMBUFSIZE);
1353 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 1364 push_rxbufs(card, lb);
1354 } 1365 }
1355 card->lbfqc = i; 1366 card->lbfqc = i;
1356 process_rsq(card); 1367 process_rsq(card);
@@ -2227,6 +2238,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2227 recycle_rx_buf(card, skb); 2238 recycle_rx_buf(card, skb);
2228 return; 2239 return;
2229 } 2240 }
2241 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2230 } 2242 }
2231 else 2243 else
2232 if (--card->iovpool.count < card->iovnr.min) 2244 if (--card->iovpool.count < card->iovnr.min)
@@ -2234,6 +2246,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2234 struct sk_buff *new_iovb; 2246 struct sk_buff *new_iovb;
2235 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) 2247 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2236 { 2248 {
2249 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2237 skb_queue_tail(&card->iovpool.queue, new_iovb); 2250 skb_queue_tail(&card->iovpool.queue, new_iovb);
2238 card->iovpool.count++; 2251 card->iovpool.count++;
2239 } 2252 }
@@ -2264,7 +2277,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2264 2277
2265 if (NS_SKB(iovb)->iovcnt == 1) 2278 if (NS_SKB(iovb)->iovcnt == 1)
2266 { 2279 {
2267 if (skb->list != &card->sbpool.queue) 2280 if (NS_SKB_CB(skb)->buf_type != BUF_SM)
2268 { 2281 {
2269 printk("nicstar%d: Expected a small buffer, and this is not one.\n", 2282 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2270 card->index); 2283 card->index);
@@ -2278,7 +2291,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2278 } 2291 }
2279 else /* NS_SKB(iovb)->iovcnt >= 2 */ 2292 else /* NS_SKB(iovb)->iovcnt >= 2 */
2280 { 2293 {
2281 if (skb->list != &card->lbpool.queue) 2294 if (NS_SKB_CB(skb)->buf_type != BUF_LG)
2282 { 2295 {
2283 printk("nicstar%d: Expected a large buffer, and this is not one.\n", 2296 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2284 card->index); 2297 card->index);
@@ -2322,8 +2335,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2322 /* skb points to a small buffer */ 2335 /* skb points to a small buffer */
2323 if (!atm_charge(vcc, skb->truesize)) 2336 if (!atm_charge(vcc, skb->truesize))
2324 { 2337 {
2325 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 2338 push_rxbufs(card, skb);
2326 0, 0);
2327 atomic_inc(&vcc->stats->rx_drop); 2339 atomic_inc(&vcc->stats->rx_drop);
2328 } 2340 }
2329 else 2341 else
@@ -2350,8 +2362,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2350 { 2362 {
2351 if (!atm_charge(vcc, sb->truesize)) 2363 if (!atm_charge(vcc, sb->truesize))
2352 { 2364 {
2353 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2365 push_rxbufs(card, sb);
2354 0, 0);
2355 atomic_inc(&vcc->stats->rx_drop); 2366 atomic_inc(&vcc->stats->rx_drop);
2356 } 2367 }
2357 else 2368 else
@@ -2367,16 +2378,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2367 atomic_inc(&vcc->stats->rx); 2378 atomic_inc(&vcc->stats->rx);
2368 } 2379 }
2369 2380
2370 push_rxbufs(card, BUF_LG, (u32) skb, 2381 push_rxbufs(card, skb);
2371 (u32) virt_to_bus(skb->data), 0, 0);
2372 2382
2373 } 2383 }
2374 else /* len > NS_SMBUFSIZE, the usual case */ 2384 else /* len > NS_SMBUFSIZE, the usual case */
2375 { 2385 {
2376 if (!atm_charge(vcc, skb->truesize)) 2386 if (!atm_charge(vcc, skb->truesize))
2377 { 2387 {
2378 push_rxbufs(card, BUF_LG, (u32) skb, 2388 push_rxbufs(card, skb);
2379 (u32) virt_to_bus(skb->data), 0, 0);
2380 atomic_inc(&vcc->stats->rx_drop); 2389 atomic_inc(&vcc->stats->rx_drop);
2381 } 2390 }
2382 else 2391 else
@@ -2394,8 +2403,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2394 atomic_inc(&vcc->stats->rx); 2403 atomic_inc(&vcc->stats->rx);
2395 } 2404 }
2396 2405
2397 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2406 push_rxbufs(card, sb);
2398 0, 0);
2399 2407
2400 } 2408 }
2401 2409
@@ -2430,6 +2438,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2430 card->hbpool.count++; 2438 card->hbpool.count++;
2431 } 2439 }
2432 } 2440 }
2441 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2433 } 2442 }
2434 else 2443 else
2435 if (--card->hbpool.count < card->hbnr.min) 2444 if (--card->hbpool.count < card->hbnr.min)
@@ -2437,6 +2446,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2437 struct sk_buff *new_hb; 2446 struct sk_buff *new_hb;
2438 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2447 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2439 { 2448 {
2449 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2440 skb_queue_tail(&card->hbpool.queue, new_hb); 2450 skb_queue_tail(&card->hbpool.queue, new_hb);
2441 card->hbpool.count++; 2451 card->hbpool.count++;
2442 } 2452 }
@@ -2444,6 +2454,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2444 { 2454 {
2445 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2455 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2446 { 2456 {
2457 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2447 skb_queue_tail(&card->hbpool.queue, new_hb); 2458 skb_queue_tail(&card->hbpool.queue, new_hb);
2448 card->hbpool.count++; 2459 card->hbpool.count++;
2449 } 2460 }
@@ -2473,8 +2484,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2473 remaining = len - iov->iov_len; 2484 remaining = len - iov->iov_len;
2474 iov++; 2485 iov++;
2475 /* Free the small buffer */ 2486 /* Free the small buffer */
2476 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2487 push_rxbufs(card, sb);
2477 0, 0);
2478 2488
2479 /* Copy all large buffers to the huge buffer and free them */ 2489 /* Copy all large buffers to the huge buffer and free them */
2480 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) 2490 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
@@ -2485,8 +2495,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2485 skb_put(hb, tocopy); 2495 skb_put(hb, tocopy);
2486 iov++; 2496 iov++;
2487 remaining -= tocopy; 2497 remaining -= tocopy;
2488 push_rxbufs(card, BUF_LG, (u32) lb, 2498 push_rxbufs(card, lb);
2489 (u32) virt_to_bus(lb->data), 0, 0);
2490 } 2499 }
2491#ifdef EXTRA_DEBUG 2500#ifdef EXTRA_DEBUG
2492 if (remaining != 0 || hb->len != len) 2501 if (remaining != 0 || hb->len != len)
@@ -2527,9 +2536,10 @@ static void ns_sb_destructor(struct sk_buff *sb)
2527 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2536 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2528 if (sb == NULL) 2537 if (sb == NULL)
2529 break; 2538 break;
2539 NS_SKB_CB(sb)->buf_type = BUF_SM;
2530 skb_queue_tail(&card->sbpool.queue, sb); 2540 skb_queue_tail(&card->sbpool.queue, sb);
2531 skb_reserve(sb, NS_AAL0_HEADER); 2541 skb_reserve(sb, NS_AAL0_HEADER);
2532 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2542 push_rxbufs(card, sb);
2533 } while (card->sbfqc < card->sbnr.min); 2543 } while (card->sbfqc < card->sbnr.min);
2534} 2544}
2535 2545
@@ -2550,9 +2560,10 @@ static void ns_lb_destructor(struct sk_buff *lb)
2550 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2560 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2551 if (lb == NULL) 2561 if (lb == NULL)
2552 break; 2562 break;
2563 NS_SKB_CB(lb)->buf_type = BUF_LG;
2553 skb_queue_tail(&card->lbpool.queue, lb); 2564 skb_queue_tail(&card->lbpool.queue, lb);
2554 skb_reserve(lb, NS_SMBUFSIZE); 2565 skb_reserve(lb, NS_SMBUFSIZE);
2555 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2566 push_rxbufs(card, lb);
2556 } while (card->lbfqc < card->lbnr.min); 2567 } while (card->lbfqc < card->lbnr.min);
2557} 2568}
2558 2569
@@ -2569,6 +2580,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
2569 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2580 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2570 if (hb == NULL) 2581 if (hb == NULL)
2571 break; 2582 break;
2583 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2572 skb_queue_tail(&card->hbpool.queue, hb); 2584 skb_queue_tail(&card->hbpool.queue, hb);
2573 card->hbpool.count++; 2585 card->hbpool.count++;
2574 } 2586 }
@@ -2577,45 +2589,25 @@ static void ns_hb_destructor(struct sk_buff *hb)
2577#endif /* NS_USE_DESTRUCTORS */ 2589#endif /* NS_USE_DESTRUCTORS */
2578 2590
2579 2591
2580
2581static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) 2592static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2582{ 2593{
2583 if (skb->list == &card->sbpool.queue) 2594 struct ns_skb_cb *cb = NS_SKB_CB(skb);
2584 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2585 else if (skb->list == &card->lbpool.queue)
2586 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2587 else
2588 {
2589 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2590 dev_kfree_skb_any(skb);
2591 }
2592}
2593 2595
2596 if (unlikely(cb->buf_type == BUF_NONE)) {
2597 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2598 dev_kfree_skb_any(skb);
2599 } else
2600 push_rxbufs(card, skb);
2601}
2594 2602
2595 2603
2596static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) 2604static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2597{ 2605{
2598 struct sk_buff *skb; 2606 while (count-- > 0)
2599 2607 recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
2600 for (; count > 0; count--)
2601 {
2602 skb = (struct sk_buff *) (iov++)->iov_base;
2603 if (skb->list == &card->sbpool.queue)
2604 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2605 0, 0);
2606 else if (skb->list == &card->lbpool.queue)
2607 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2608 0, 0);
2609 else
2610 {
2611 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2612 dev_kfree_skb_any(skb);
2613 }
2614 }
2615} 2608}
2616 2609
2617 2610
2618
2619static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) 2611static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2620{ 2612{
2621 if (card->iovpool.count < card->iovnr.max) 2613 if (card->iovpool.count < card->iovnr.max)
@@ -2631,7 +2623,7 @@ static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2631 2623
2632static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) 2624static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2633{ 2625{
2634 skb_unlink(sb); 2626 skb_unlink(sb, &card->sbpool.queue);
2635#ifdef NS_USE_DESTRUCTORS 2627#ifdef NS_USE_DESTRUCTORS
2636 if (card->sbfqc < card->sbnr.min) 2628 if (card->sbfqc < card->sbnr.min)
2637#else 2629#else
@@ -2640,10 +2632,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2640 struct sk_buff *new_sb; 2632 struct sk_buff *new_sb;
2641 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2633 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2642 { 2634 {
2635 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2643 skb_queue_tail(&card->sbpool.queue, new_sb); 2636 skb_queue_tail(&card->sbpool.queue, new_sb);
2644 skb_reserve(new_sb, NS_AAL0_HEADER); 2637 skb_reserve(new_sb, NS_AAL0_HEADER);
2645 push_rxbufs(card, BUF_SM, (u32) new_sb, 2638 push_rxbufs(card, new_sb);
2646 (u32) virt_to_bus(new_sb->data), 0, 0);
2647 } 2639 }
2648 } 2640 }
2649 if (card->sbfqc < card->sbnr.init) 2641 if (card->sbfqc < card->sbnr.init)
@@ -2652,10 +2644,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2652 struct sk_buff *new_sb; 2644 struct sk_buff *new_sb;
2653 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2645 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2654 { 2646 {
2647 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2655 skb_queue_tail(&card->sbpool.queue, new_sb); 2648 skb_queue_tail(&card->sbpool.queue, new_sb);
2656 skb_reserve(new_sb, NS_AAL0_HEADER); 2649 skb_reserve(new_sb, NS_AAL0_HEADER);
2657 push_rxbufs(card, BUF_SM, (u32) new_sb, 2650 push_rxbufs(card, new_sb);
2658 (u32) virt_to_bus(new_sb->data), 0, 0);
2659 } 2651 }
2660 } 2652 }
2661} 2653}
@@ -2664,7 +2656,7 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2664 2656
2665static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) 2657static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2666{ 2658{
2667 skb_unlink(lb); 2659 skb_unlink(lb, &card->lbpool.queue);
2668#ifdef NS_USE_DESTRUCTORS 2660#ifdef NS_USE_DESTRUCTORS
2669 if (card->lbfqc < card->lbnr.min) 2661 if (card->lbfqc < card->lbnr.min)
2670#else 2662#else
@@ -2673,10 +2665,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2673 struct sk_buff *new_lb; 2665 struct sk_buff *new_lb;
2674 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2666 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2675 { 2667 {
2668 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2676 skb_queue_tail(&card->lbpool.queue, new_lb); 2669 skb_queue_tail(&card->lbpool.queue, new_lb);
2677 skb_reserve(new_lb, NS_SMBUFSIZE); 2670 skb_reserve(new_lb, NS_SMBUFSIZE);
2678 push_rxbufs(card, BUF_LG, (u32) new_lb, 2671 push_rxbufs(card, new_lb);
2679 (u32) virt_to_bus(new_lb->data), 0, 0);
2680 } 2672 }
2681 } 2673 }
2682 if (card->lbfqc < card->lbnr.init) 2674 if (card->lbfqc < card->lbnr.init)
@@ -2685,10 +2677,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2685 struct sk_buff *new_lb; 2677 struct sk_buff *new_lb;
2686 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2678 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2687 { 2679 {
2680 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2688 skb_queue_tail(&card->lbpool.queue, new_lb); 2681 skb_queue_tail(&card->lbpool.queue, new_lb);
2689 skb_reserve(new_lb, NS_SMBUFSIZE); 2682 skb_reserve(new_lb, NS_SMBUFSIZE);
2690 push_rxbufs(card, BUF_LG, (u32) new_lb, 2683 push_rxbufs(card, new_lb);
2691 (u32) virt_to_bus(new_lb->data), 0, 0);
2692 } 2684 }
2693 } 2685 }
2694} 2686}
@@ -2880,9 +2872,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2880 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2872 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2881 if (sb == NULL) 2873 if (sb == NULL)
2882 return -ENOMEM; 2874 return -ENOMEM;
2875 NS_SKB_CB(sb)->buf_type = BUF_SM;
2883 skb_queue_tail(&card->sbpool.queue, sb); 2876 skb_queue_tail(&card->sbpool.queue, sb);
2884 skb_reserve(sb, NS_AAL0_HEADER); 2877 skb_reserve(sb, NS_AAL0_HEADER);
2885 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2878 push_rxbufs(card, sb);
2886 } 2879 }
2887 break; 2880 break;
2888 2881
@@ -2894,9 +2887,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2894 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2887 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2895 if (lb == NULL) 2888 if (lb == NULL)
2896 return -ENOMEM; 2889 return -ENOMEM;
2890 NS_SKB_CB(lb)->buf_type = BUF_LG;
2897 skb_queue_tail(&card->lbpool.queue, lb); 2891 skb_queue_tail(&card->lbpool.queue, lb);
2898 skb_reserve(lb, NS_SMBUFSIZE); 2892 skb_reserve(lb, NS_SMBUFSIZE);
2899 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2893 push_rxbufs(card, lb);
2900 } 2894 }
2901 break; 2895 break;
2902 2896
@@ -2923,6 +2917,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2923 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2917 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2924 if (hb == NULL) 2918 if (hb == NULL)
2925 return -ENOMEM; 2919 return -ENOMEM;
2920 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2926 ns_grab_int_lock(card, flags); 2921 ns_grab_int_lock(card, flags);
2927 skb_queue_tail(&card->hbpool.queue, hb); 2922 skb_queue_tail(&card->hbpool.queue, hb);
2928 card->hbpool.count++; 2923 card->hbpool.count++;
@@ -2953,6 +2948,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2953 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2948 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2954 if (iovb == NULL) 2949 if (iovb == NULL)
2955 return -ENOMEM; 2950 return -ENOMEM;
2951 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2956 ns_grab_int_lock(card, flags); 2952 ns_grab_int_lock(card, flags);
2957 skb_queue_tail(&card->iovpool.queue, iovb); 2953 skb_queue_tail(&card->iovpool.queue, iovb);
2958 card->iovpool.count++; 2954 card->iovpool.count++;
@@ -2979,17 +2975,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2979} 2975}
2980 2976
2981 2977
2982
2983static void which_list(ns_dev *card, struct sk_buff *skb) 2978static void which_list(ns_dev *card, struct sk_buff *skb)
2984{ 2979{
2985 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ? 2980 printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
2986 "small" : skb->list == &card->lbpool.queue ? "large" :
2987 skb->list == &card->hbpool.queue ? "huge" :
2988 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2989} 2981}
2990 2982
2991 2983
2992
2993static void ns_poll(unsigned long arg) 2984static void ns_poll(unsigned long arg)
2994{ 2985{
2995 int i; 2986 int i;
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index ea83c46c8ba5..5997bcb45b59 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -103,8 +103,14 @@
103 103
104#define NS_IOREMAP_SIZE 4096 104#define NS_IOREMAP_SIZE 4096
105 105
106#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ 106/*
107#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ 107 * BUF_XX distinguish the Rx buffers depending on their (small/large) size.
108 * BUG_SM and BUG_LG are both used by the driver and the device.
109 * BUF_NONE is only used by the driver.
110 */
111#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */
112#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */
113#define BUF_NONE 0xffffffff /* Software only: */
108 114
109#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ 115#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */
110#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ 116#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \
@@ -684,6 +690,12 @@ enum ns_regs
684/* Device driver structures ***************************************************/ 690/* Device driver structures ***************************************************/
685 691
686 692
693struct ns_skb_cb {
694 u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
695};
696
697#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
698
687typedef struct tsq_info 699typedef struct tsq_info
688{ 700{
689 void *org; 701 void *org;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a2b236a966e0..85fee9530fa9 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
417 chan = (here[3] & uPD98401_AAL5_CHAN) >> 417 chan = (here[3] & uPD98401_AAL5_CHAN) >>
418 uPD98401_AAL5_CHAN_SHIFT; 418 uPD98401_AAL5_CHAN_SHIFT;
419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { 419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
420 int pos = ZATM_VCC(vcc)->pool;
421
420 vcc = zatm_dev->rx_map[chan]; 422 vcc = zatm_dev->rx_map[chan];
421 if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool]) 423 if (skb == zatm_dev->last_free[pos])
422 zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL; 424 zatm_dev->last_free[pos] = NULL;
423 skb_unlink(skb); 425 skb_unlink(skb, zatm_dev->pool + pos);
424 } 426 }
425 else { 427 else {
426 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " 428 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index c42d7e6ac1c5..e8d2a340356d 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb)
158 if (err) { 158 if (err) {
159 BT_ERR("%s bulk tx submit failed urb %p err %d", 159 BT_ERR("%s bulk tx submit failed urb %p err %d",
160 bfusb->hdev->name, urb, err); 160 bfusb->hdev->name, urb, err);
161 skb_unlink(skb); 161 skb_unlink(skb, &bfusb->pending_q);
162 usb_free_urb(urb); 162 usb_free_urb(urb);
163 } else 163 } else
164 atomic_inc(&bfusb->pending_tx); 164 atomic_inc(&bfusb->pending_tx);
@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs)
212 212
213 read_lock(&bfusb->lock); 213 read_lock(&bfusb->lock);
214 214
215 skb_unlink(skb); 215 skb_unlink(skb, &bfusb->pending_q);
216 skb_queue_tail(&bfusb->completed_q, skb); 216 skb_queue_tail(&bfusb->completed_q, skb);
217 217
218 bfusb_tx_wakeup(bfusb); 218 bfusb_tx_wakeup(bfusb);
@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb)
253 if (err) { 253 if (err) {
254 BT_ERR("%s bulk rx submit failed urb %p err %d", 254 BT_ERR("%s bulk rx submit failed urb %p err %d",
255 bfusb->hdev->name, urb, err); 255 bfusb->hdev->name, urb, err);
256 skb_unlink(skb); 256 skb_unlink(skb, &bfusb->pending_q);
257 kfree_skb(skb); 257 kfree_skb(skb);
258 usb_free_urb(urb); 258 usb_free_urb(urb);
259 } 259 }
@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs)
398 buf += len; 398 buf += len;
399 } 399 }
400 400
401 skb_unlink(skb); 401 skb_unlink(skb, &bfusb->pending_q);
402 kfree_skb(skb); 402 kfree_skb(skb);
403 403
404 bfusb_rx_submit(bfusb, urb); 404 bfusb_rx_submit(bfusb, urb);
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index b248d89de8b4..d633770fac8e 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
681 return; 681 return;
682 } 682 }
683 683
684 __skb_unlink(skb, skb->list); 684 __skb_unlink(skb, &host->pending_packet_queue);
685 685
686 if (packet->state == hpsb_queued) { 686 if (packet->state == hpsb_queued) {
687 packet->sendtime = jiffies; 687 packet->sendtime = jiffies;
@@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque)
989 packet = (struct hpsb_packet *)skb->data; 989 packet = (struct hpsb_packet *)skb->data;
990 990
991 if (time_before(packet->sendtime + expire, jiffies)) { 991 if (time_before(packet->sendtime + expire, jiffies)) {
992 __skb_unlink(skb, skb->list); 992 __skb_unlink(skb, &host->pending_packet_queue);
993 packet->state = hpsb_complete; 993 packet->state = hpsb_complete;
994 packet->ack_code = ACKX_TIMEOUT; 994 packet->ack_code = ACKX_TIMEOUT;
995 queue_packet_complete(packet); 995 queue_packet_complete(packet);
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index afa46681f983..6ae6eb322111 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && 606 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) &&
607 (m->msg.data_b3_req.blocknr == blocknr)) { 607 (m->msg.data_b3_req.blocknr == blocknr)) {
608 /* found corresponding DATA_B3_REQ */ 608 /* found corresponding DATA_B3_REQ */
609 skb_unlink(tmp); 609 skb_unlink(tmp, &card->ackq);
610 chan->queued -= m->msg.data_b3_req.datalen; 610 chan->queued -= m->msg.data_b3_req.datalen;
611 if (m->msg.data_b3_req.flags) 611 if (m->msg.data_b3_req.flags)
612 ret = m->msg.data_b3_req.datalen; 612 ret = m->msg.data_b3_req.datalen;
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index 3ad0b6751f6f..221354eea21f 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
156 156
157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); 157 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
158 158
159#ifdef SHAPER_COMPLEX /* and broken.. */
160
161 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
162 {
163 if(ptr->pri<skb->pri
164 && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
165 {
166 struct sk_buff *tmp=ptr->prev;
167
168 /*
169 * It goes before us therefore we slip the length
170 * of the new frame.
171 */
172
173 SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
174 SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
175
176 /*
177 * The packet may have slipped so far back it
178 * fell off.
179 */
180 if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
181 {
182 skb_unlink(ptr);
183 dev_kfree_skb(ptr);
184 }
185 ptr=tmp;
186 }
187 else
188 break;
189 }
190 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
191 skb_queue_head(&shaper->sendq,skb);
192 else
193 {
194 struct sk_buff *tmp;
195 /*
196 * Set the packet clock out time according to the
197 * frames ahead. Im sure a bit of thought could drop
198 * this loop.
199 */
200 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
201 SHAPERCB(skb)->shapeclock+=tmp->shapelen;
202 skb_append(ptr,skb);
203 }
204#else
205 { 159 {
206 struct sk_buff *tmp; 160 struct sk_buff *tmp;
207 /* 161 /*
@@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
220 } else 174 } else
221 skb_queue_tail(&shaper->sendq, skb); 175 skb_queue_tail(&shaper->sendq, skb);
222 } 176 }
223#endif 177
224 if(sh_debug) 178 if(sh_debug)
225 printk("Frame queued.\n"); 179 printk("Frame queued.\n");
226 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) 180 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
@@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper)
302 * Pull the frame and get interrupts back on. 256 * Pull the frame and get interrupts back on.
303 */ 257 */
304 258
305 skb_unlink(skb); 259 skb_unlink(skb, &shaper->sendq);
306 if (shaper->recovery < 260 if (shaper->recovery <
307 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) 261 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
308 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; 262 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
index c5f5e62aab8b..0497dbdb8631 100644
--- a/drivers/net/wan/sdla_fr.c
+++ b/drivers/net/wan/sdla_fr.c
@@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); 445void s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
446 446
447unsigned short calc_checksum (char *, int); 447unsigned short calc_checksum (char *, int);
448static int setup_fr_header(struct sk_buff** skb, 448static int setup_fr_header(struct sk_buff *skb,
449 struct net_device* dev, char op_mode); 449 struct net_device* dev, char op_mode);
450 450
451 451
@@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev)
1372 /* Move the if_header() code to here. By inserting frame 1372 /* Move the if_header() code to here. By inserting frame
1373 * relay header in if_header() we would break the 1373 * relay header in if_header() we would break the
1374 * tcpdump and other packet sniffers */ 1374 * tcpdump and other packet sniffers */
1375 chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby); 1375 chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby);
1376 if (chan->fr_header_len < 0 ){ 1376 if (chan->fr_header_len < 0 ){
1377 ++chan->ifstats.tx_dropped; 1377 ++chan->ifstats.tx_dropped;
1378 ++card->wandev.stats.tx_dropped; 1378 ++card->wandev.stats.tx_dropped;
@@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev,
1597 return 1; 1597 return 1;
1598 } 1598 }
1599 1599
1600 skb_unlink(skb);
1601
1602 chan->transmit_length = len; 1600 chan->transmit_length = len;
1603 chan->delay_skb = skb; 1601 chan->delay_skb = skb;
1604 1602
@@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card)
4871 } 4869 }
4872} 4870}
4873 4871
4874static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, 4872static int setup_fr_header(struct sk_buff *skb, struct net_device* dev,
4875 char op_mode) 4873 char op_mode)
4876{ 4874{
4877 struct sk_buff *skb = *skb_orig;
4878 fr_channel_t *chan=dev->priv; 4875 fr_channel_t *chan=dev->priv;
4879 4876
4880 if (op_mode == WANPIPE){ 4877 if (op_mode == WANPIPE) {
4881
4882 chan->fr_header[0]=Q922_UI; 4878 chan->fr_header[0]=Q922_UI;
4883 4879
4884 switch (htons(skb->protocol)){ 4880 switch (htons(skb->protocol)){
4885
4886 case ETH_P_IP: 4881 case ETH_P_IP:
4887 chan->fr_header[1]=NLPID_IP; 4882 chan->fr_header[1]=NLPID_IP;
4888 break; 4883 break;
@@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4894 } 4889 }
4895 4890
4896 /* If we are in bridging mode, we must apply 4891 /* If we are in bridging mode, we must apply
4897 * an Ethernet header */ 4892 * an Ethernet header
4898 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){ 4893 */
4899 4894 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) {
4900
4901 /* Encapsulate the packet as a bridged Ethernet frame. */ 4895 /* Encapsulate the packet as a bridged Ethernet frame. */
4902#ifdef DEBUG 4896#ifdef DEBUG
4903 printk(KERN_INFO "%s: encapsulating skb for frame relay\n", 4897 printk(KERN_INFO "%s: encapsulating skb for frame relay\n",
4904 dev->name); 4898 dev->name);
4905#endif 4899#endif
4906
4907 chan->fr_header[0] = 0x03; 4900 chan->fr_header[0] = 0x03;
4908 chan->fr_header[1] = 0x00; 4901 chan->fr_header[1] = 0x00;
4909 chan->fr_header[2] = 0x80; 4902 chan->fr_header[2] = 0x80;
@@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4916 /* Yuck. */ 4909 /* Yuck. */
4917 skb->protocol = ETH_P_802_3; 4910 skb->protocol = ETH_P_802_3;
4918 return 8; 4911 return 8;
4919
4920 } 4912 }
4921 4913
4922 return 0; 4914 return 0;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 4528a00c45b0..a2f67245f6da 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net)
2903 * completion callbacks. 2.5 should have fixed those bugs... 2903 * completion callbacks. 2.5 should have fixed those bugs...
2904 */ 2904 */
2905 2905
2906static void defer_bh (struct usbnet *dev, struct sk_buff *skb) 2906static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
2907{ 2907{
2908 struct sk_buff_head *list = skb->list;
2909 unsigned long flags; 2908 unsigned long flags;
2910 2909
2911 spin_lock_irqsave (&list->lock, flags); 2910 spin_lock_irqsave(&list->lock, flags);
2912 __skb_unlink (skb, list); 2911 __skb_unlink(skb, list);
2913 spin_unlock (&list->lock); 2912 spin_unlock(&list->lock);
2914 spin_lock (&dev->done.lock); 2913 spin_lock(&dev->done.lock);
2915 __skb_queue_tail (&dev->done, skb); 2914 __skb_queue_tail(&dev->done, skb);
2916 if (dev->done.qlen == 1) 2915 if (dev->done.qlen == 1)
2917 tasklet_schedule (&dev->bh); 2916 tasklet_schedule(&dev->bh);
2918 spin_unlock_irqrestore (&dev->done.lock, flags); 2917 spin_unlock_irqrestore(&dev->done.lock, flags);
2919} 2918}
2920 2919
2921/* some work can't be done in tasklets, so we use keventd 2920/* some work can't be done in tasklets, so we use keventd
@@ -3120,7 +3119,7 @@ block:
3120 break; 3119 break;
3121 } 3120 }
3122 3121
3123 defer_bh (dev, skb); 3122 defer_bh(dev, skb, &dev->rxq);
3124 3123
3125 if (urb) { 3124 if (urb) {
3126 if (netif_running (dev->net) 3125 if (netif_running (dev->net)
@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs)
3490 3489
3491 urb->dev = NULL; 3490 urb->dev = NULL;
3492 entry->state = tx_done; 3491 entry->state = tx_done;
3493 defer_bh (dev, skb); 3492 defer_bh(dev, skb, &dev->txq);
3494} 3493}
3495 3494
3496/*-------------------------------------------------------------------------*/ 3495/*-------------------------------------------------------------------------*/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4b929c3c1a98..76c68851474c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -204,7 +204,6 @@ struct sk_buff {
204 struct sk_buff *next; 204 struct sk_buff *next;
205 struct sk_buff *prev; 205 struct sk_buff *prev;
206 206
207 struct sk_buff_head *list;
208 struct sock *sk; 207 struct sock *sk;
209 struct timeval stamp; 208 struct timeval stamp;
210 struct net_device *dev; 209 struct net_device *dev;
@@ -597,7 +596,6 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
597{ 596{
598 struct sk_buff *prev, *next; 597 struct sk_buff *prev, *next;
599 598
600 newsk->list = list;
601 list->qlen++; 599 list->qlen++;
602 prev = (struct sk_buff *)list; 600 prev = (struct sk_buff *)list;
603 next = prev->next; 601 next = prev->next;
@@ -622,7 +620,6 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
622{ 620{
623 struct sk_buff *prev, *next; 621 struct sk_buff *prev, *next;
624 622
625 newsk->list = list;
626 list->qlen++; 623 list->qlen++;
627 next = (struct sk_buff *)list; 624 next = (struct sk_buff *)list;
628 prev = next->prev; 625 prev = next->prev;
@@ -655,7 +652,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
655 next->prev = prev; 652 next->prev = prev;
656 prev->next = next; 653 prev->next = next;
657 result->next = result->prev = NULL; 654 result->next = result->prev = NULL;
658 result->list = NULL;
659 } 655 }
660 return result; 656 return result;
661} 657}
@@ -664,7 +660,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
664/* 660/*
665 * Insert a packet on a list. 661 * Insert a packet on a list.
666 */ 662 */
667extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk); 663extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
668static inline void __skb_insert(struct sk_buff *newsk, 664static inline void __skb_insert(struct sk_buff *newsk,
669 struct sk_buff *prev, struct sk_buff *next, 665 struct sk_buff *prev, struct sk_buff *next,
670 struct sk_buff_head *list) 666 struct sk_buff_head *list)
@@ -672,24 +668,23 @@ static inline void __skb_insert(struct sk_buff *newsk,
672 newsk->next = next; 668 newsk->next = next;
673 newsk->prev = prev; 669 newsk->prev = prev;
674 next->prev = prev->next = newsk; 670 next->prev = prev->next = newsk;
675 newsk->list = list;
676 list->qlen++; 671 list->qlen++;
677} 672}
678 673
679/* 674/*
680 * Place a packet after a given packet in a list. 675 * Place a packet after a given packet in a list.
681 */ 676 */
682extern void skb_append(struct sk_buff *old, struct sk_buff *newsk); 677extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
683static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk) 678static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
684{ 679{
685 __skb_insert(newsk, old, old->next, old->list); 680 __skb_insert(newsk, old, old->next, list);
686} 681}
687 682
688/* 683/*
689 * remove sk_buff from list. _Must_ be called atomically, and with 684 * remove sk_buff from list. _Must_ be called atomically, and with
690 * the list known.. 685 * the list known..
691 */ 686 */
692extern void skb_unlink(struct sk_buff *skb); 687extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
693static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 688static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
694{ 689{
695 struct sk_buff *next, *prev; 690 struct sk_buff *next, *prev;
@@ -698,7 +693,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
698 next = skb->next; 693 next = skb->next;
699 prev = skb->prev; 694 prev = skb->prev;
700 skb->next = skb->prev = NULL; 695 skb->next = skb->prev = NULL;
701 skb->list = NULL;
702 next->prev = prev; 696 next->prev = prev;
703 prev->next = next; 697 prev->next = next;
704} 698}
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 181a3002d8ad..4b1faca5013f 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -34,7 +34,6 @@
34 34
35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) 35void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
36{ 36{
37 struct sk_buff *skb;
38 unsigned long flags; 37 unsigned long flags;
39 struct sk_buff *skb_from = (struct sk_buff *) from; 38 struct sk_buff *skb_from = (struct sk_buff *) from;
40 struct sk_buff *skb_to = (struct sk_buff *) to; 39 struct sk_buff *skb_to = (struct sk_buff *) to;
@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
47 prev->next = skb_to; 46 prev->next = skb_to;
48 to->prev->next = from->next; 47 to->prev->next = from->next;
49 to->prev = from->prev; 48 to->prev = from->prev;
50 for (skb = from->next; skb != skb_to; skb = skb->next)
51 skb->list = to;
52 to->qlen += from->qlen; 49 to->qlen += from->qlen;
53 spin_unlock(&to->lock); 50 spin_unlock(&to->lock);
54 from->prev = skb_from; 51 from->prev = skb_from;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 99694b57f6f5..eb7343c10a9f 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25)
76 if (skb_prev == NULL) 76 if (skb_prev == NULL)
77 skb_queue_head(&ax25->write_queue, skb); 77 skb_queue_head(&ax25->write_queue, skb);
78 else 78 else
79 skb_append(skb_prev, skb); 79 skb_append(skb_prev, skb, &ax25->write_queue);
80 skb_prev = skb; 80 skb_prev = skb;
81 } 81 }
82} 82}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 096991cb09d9..e6564b0a6839 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb)
281 281
282void __kfree_skb(struct sk_buff *skb) 282void __kfree_skb(struct sk_buff *skb)
283{ 283{
284 BUG_ON(skb->list != NULL);
285
286 dst_release(skb->dst); 284 dst_release(skb->dst);
287#ifdef CONFIG_XFRM 285#ifdef CONFIG_XFRM
288 secpath_put(skb->sp); 286 secpath_put(skb->sp);
@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
333#define C(x) n->x = skb->x 331#define C(x) n->x = skb->x
334 332
335 n->next = n->prev = NULL; 333 n->next = n->prev = NULL;
336 n->list = NULL;
337 n->sk = NULL; 334 n->sk = NULL;
338 C(stamp); 335 C(stamp);
339 C(dev); 336 C(dev);
@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
403 */ 400 */
404 unsigned long offset = new->data - old->data; 401 unsigned long offset = new->data - old->data;
405 402
406 new->list = NULL;
407 new->sk = NULL; 403 new->sk = NULL;
408 new->dev = old->dev; 404 new->dev = old->dev;
409 new->real_dev = old->real_dev; 405 new->real_dev = old->real_dev;
@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1342 __skb_queue_tail(list, newsk); 1338 __skb_queue_tail(list, newsk);
1343 spin_unlock_irqrestore(&list->lock, flags); 1339 spin_unlock_irqrestore(&list->lock, flags);
1344} 1340}
1341
1345/** 1342/**
1346 * skb_unlink - remove a buffer from a list 1343 * skb_unlink - remove a buffer from a list
1347 * @skb: buffer to remove 1344 * @skb: buffer to remove
1345 * @list: list to use
1348 * 1346 *
1349 * Place a packet after a given packet in a list. The list locks are taken 1347 * Remove a packet from a list. The list locks are taken and this
1350 * and this function is atomic with respect to other list locked calls 1348 * function is atomic with respect to other list locked calls
1351 * 1349 *
1352 * Works even without knowing the list it is sitting on, which can be 1350 * You must know what list the SKB is on.
1353 * handy at times. It also means that THE LIST MUST EXIST when you
1354 * unlink. Thus a list must have its contents unlinked before it is
1355 * destroyed.
1356 */ 1351 */
1357void skb_unlink(struct sk_buff *skb) 1352void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1358{ 1353{
1359 struct sk_buff_head *list = skb->list; 1354 unsigned long flags;
1360
1361 if (list) {
1362 unsigned long flags;
1363 1355
1364 spin_lock_irqsave(&list->lock, flags); 1356 spin_lock_irqsave(&list->lock, flags);
1365 if (skb->list == list) 1357 __skb_unlink(skb, list);
1366 __skb_unlink(skb, skb->list); 1358 spin_unlock_irqrestore(&list->lock, flags);
1367 spin_unlock_irqrestore(&list->lock, flags);
1368 }
1369} 1359}
1370 1360
1371
1372/** 1361/**
1373 * skb_append - append a buffer 1362 * skb_append - append a buffer
1374 * @old: buffer to insert after 1363 * @old: buffer to insert after
1375 * @newsk: buffer to insert 1364 * @newsk: buffer to insert
1365 * @list: list to use
1376 * 1366 *
1377 * Place a packet after a given packet in a list. The list locks are taken 1367 * Place a packet after a given packet in a list. The list locks are taken
1378 * and this function is atomic with respect to other list locked calls. 1368 * and this function is atomic with respect to other list locked calls.
1379 * A buffer cannot be placed on two lists at the same time. 1369 * A buffer cannot be placed on two lists at the same time.
1380 */ 1370 */
1381 1371void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1382void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1383{ 1372{
1384 unsigned long flags; 1373 unsigned long flags;
1385 1374
1386 spin_lock_irqsave(&old->list->lock, flags); 1375 spin_lock_irqsave(&list->lock, flags);
1387 __skb_append(old, newsk); 1376 __skb_append(old, newsk, list);
1388 spin_unlock_irqrestore(&old->list->lock, flags); 1377 spin_unlock_irqrestore(&list->lock, flags);
1389} 1378}
1390 1379
1391 1380
@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1393 * skb_insert - insert a buffer 1382 * skb_insert - insert a buffer
1394 * @old: buffer to insert before 1383 * @old: buffer to insert before
1395 * @newsk: buffer to insert 1384 * @newsk: buffer to insert
1385 * @list: list to use
1386 *
1387 * Place a packet before a given packet in a list. The list locks are
1388 * taken and this function is atomic with respect to other list locked
1389 * calls.
1396 * 1390 *
1397 * Place a packet before a given packet in a list. The list locks are taken
1398 * and this function is atomic with respect to other list locked calls
1399 * A buffer cannot be placed on two lists at the same time. 1391 * A buffer cannot be placed on two lists at the same time.
1400 */ 1392 */
1401 1393void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1402void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
1403{ 1394{
1404 unsigned long flags; 1395 unsigned long flags;
1405 1396
1406 spin_lock_irqsave(&old->list->lock, flags); 1397 spin_lock_irqsave(&list->lock, flags);
1407 __skb_insert(newsk, old->prev, old, old->list); 1398 __skb_insert(newsk, old->prev, old, list);
1408 spin_unlock_irqrestore(&old->list->lock, flags); 1399 spin_unlock_irqrestore(&list->lock, flags);
1409} 1400}
1410 1401
1411#if 0 1402#if 0
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index acdd18e6adb2..0c30409fe9e5 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1763 nskb = skb->next; 1763 nskb = skb->next;
1764 1764
1765 if (skb->len == 0) { 1765 if (skb->len == 0) {
1766 skb_unlink(skb); 1766 skb_unlink(skb, queue);
1767 kfree_skb(skb); 1767 kfree_skb(skb);
1768 /* 1768 /*
1769 * N.B. Don't refer to skb or cb after this point 1769 * N.B. Don't refer to skb or cb after this point
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 8cce1fdbda90..e0bebf4bbcad 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
479 xmit_count = cb2->xmit_count; 479 xmit_count = cb2->xmit_count;
480 segnum = cb2->segnum; 480 segnum = cb2->segnum;
481 /* Remove and drop ack'ed packet */ 481 /* Remove and drop ack'ed packet */
482 skb_unlink(ack); 482 skb_unlink(ack, q);
483 kfree_skb(ack); 483 kfree_skb(ack);
484 ack = NULL; 484 ack = NULL;
485 485
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index de691e119e17..b807a314269e 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -869,7 +869,7 @@ static void aun_tx_ack(unsigned long seq, int result)
869 869
870foundit: 870foundit:
871 tx_result(skb->sk, eb->cookie, result); 871 tx_result(skb->sk, eb->cookie, result);
872 skb_unlink(skb); 872 skb_unlink(skb, &aun_queue);
873 spin_unlock_irqrestore(&aun_queue_lock, flags); 873 spin_unlock_irqrestore(&aun_queue_lock, flags);
874 kfree_skb(skb); 874 kfree_skb(skb);
875} 875}
@@ -947,7 +947,7 @@ static void ab_cleanup(unsigned long h)
947 { 947 {
948 tx_result(skb->sk, eb->cookie, 948 tx_result(skb->sk, eb->cookie,
949 ECTYPE_TRANSMIT_NOT_PRESENT); 949 ECTYPE_TRANSMIT_NOT_PRESENT);
950 skb_unlink(skb); 950 skb_unlink(skb, &aun_queue);
951 kfree_skb(skb); 951 kfree_skb(skb);
952 } 952 }
953 skb = newskb; 953 skb = newskb;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 69b1fcf70077..d2696af46c70 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -975,7 +975,7 @@ do_fault:
975 if (!skb->len) { 975 if (!skb->len) {
976 if (sk->sk_send_head == skb) 976 if (sk->sk_send_head == skb)
977 sk->sk_send_head = NULL; 977 sk->sk_send_head = NULL;
978 __skb_unlink(skb, skb->list); 978 __skb_unlink(skb, &sk->sk_write_queue);
979 sk_stream_free_skb(sk, skb); 979 sk_stream_free_skb(sk, skb);
980 } 980 }
981 981
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53a8a5399f1e..ffa24025cd02 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2085 seq_rtt = now - scb->when; 2085 seq_rtt = now - scb->when;
2086 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2086 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2087 tcp_packets_out_dec(tp, skb); 2087 tcp_packets_out_dec(tp, skb);
2088 __skb_unlink(skb, skb->list); 2088 __skb_unlink(skb, &sk->sk_write_queue);
2089 sk_stream_free_skb(sk, skb); 2089 sk_stream_free_skb(sk, skb);
2090 } 2090 }
2091 2091
@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk)
2853 2853
2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
2855 SOCK_DEBUG(sk, "ofo packet was already received \n"); 2855 SOCK_DEBUG(sk, "ofo packet was already received \n");
2856 __skb_unlink(skb, skb->list); 2856 __skb_unlink(skb, &tp->out_of_order_queue);
2857 __kfree_skb(skb); 2857 __kfree_skb(skb);
2858 continue; 2858 continue;
2859 } 2859 }
@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk)
2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
2862 TCP_SKB_CB(skb)->end_seq); 2862 TCP_SKB_CB(skb)->end_seq);
2863 2863
2864 __skb_unlink(skb, skb->list); 2864 __skb_unlink(skb, &tp->out_of_order_queue);
2865 __skb_queue_tail(&sk->sk_receive_queue, skb); 2865 __skb_queue_tail(&sk->sk_receive_queue, skb);
2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
2867 if(skb->h.th->fin) 2867 if(skb->h.th->fin)
@@ -3027,7 +3027,7 @@ drop:
3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3028 3028
3029 if (seq == TCP_SKB_CB(skb1)->end_seq) { 3029 if (seq == TCP_SKB_CB(skb1)->end_seq) {
3030 __skb_append(skb1, skb); 3030 __skb_append(skb1, skb, &tp->out_of_order_queue);
3031 3031
3032 if (!tp->rx_opt.num_sacks || 3032 if (!tp->rx_opt.num_sacks ||
3033 tp->selective_acks[0].end_seq != seq) 3033 tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3071,7 @@ drop:
3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); 3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3072 break; 3072 break;
3073 } 3073 }
3074 __skb_unlink(skb1, skb1->list); 3074 __skb_unlink(skb1, &tp->out_of_order_queue);
3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); 3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3076 __kfree_skb(skb1); 3076 __kfree_skb(skb1);
3077 } 3077 }
@@ -3088,8 +3088,9 @@ add_sack:
3088 * simplifies code) 3088 * simplifies code)
3089 */ 3089 */
3090static void 3090static void
3091tcp_collapse(struct sock *sk, struct sk_buff *head, 3091tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3092 struct sk_buff *tail, u32 start, u32 end) 3092 struct sk_buff *head, struct sk_buff *tail,
3093 u32 start, u32 end)
3093{ 3094{
3094 struct sk_buff *skb; 3095 struct sk_buff *skb;
3095 3096
@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3099 /* No new bits? It is possible on ofo queue. */ 3100 /* No new bits? It is possible on ofo queue. */
3100 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3101 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3101 struct sk_buff *next = skb->next; 3102 struct sk_buff *next = skb->next;
3102 __skb_unlink(skb, skb->list); 3103 __skb_unlink(skb, list);
3103 __kfree_skb(skb); 3104 __kfree_skb(skb);
3104 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3105 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3105 skb = next; 3106 skb = next;
@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3145 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head); 3146 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3146 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 3147 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 3148 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3148 __skb_insert(nskb, skb->prev, skb, skb->list); 3149 __skb_insert(nskb, skb->prev, skb, list);
3149 sk_stream_set_owner_r(nskb, sk); 3150 sk_stream_set_owner_r(nskb, sk);
3150 3151
3151 /* Copy data, releasing collapsed skbs. */ 3152 /* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3164 } 3165 }
3165 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3166 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3166 struct sk_buff *next = skb->next; 3167 struct sk_buff *next = skb->next;
3167 __skb_unlink(skb, skb->list); 3168 __skb_unlink(skb, list);
3168 __kfree_skb(skb); 3169 __kfree_skb(skb);
3169 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3170 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3170 skb = next; 3171 skb = next;
@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
3200 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 3201 if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3201 after(TCP_SKB_CB(skb)->seq, end) || 3202 after(TCP_SKB_CB(skb)->seq, end) ||
3202 before(TCP_SKB_CB(skb)->end_seq, start)) { 3203 before(TCP_SKB_CB(skb)->end_seq, start)) {
3203 tcp_collapse(sk, head, skb, start, end); 3204 tcp_collapse(sk, &tp->out_of_order_queue,
3205 head, skb, start, end);
3204 head = skb; 3206 head = skb;
3205 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 3207 if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3206 break; 3208 break;
@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk)
3237 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 3239 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3238 3240
3239 tcp_collapse_ofo_queue(sk); 3241 tcp_collapse_ofo_queue(sk);
3240 tcp_collapse(sk, sk->sk_receive_queue.next, 3242 tcp_collapse(sk, &sk->sk_receive_queue,
3243 sk->sk_receive_queue.next,
3241 (struct sk_buff*)&sk->sk_receive_queue, 3244 (struct sk_buff*)&sk->sk_receive_queue,
3242 tp->copied_seq, tp->rcv_nxt); 3245 tp->copied_seq, tp->rcv_nxt);
3243 sk_stream_mem_reclaim(sk); 3246 sk_stream_mem_reclaim(sk);
@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3462 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 3465 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
3463 tp->copied_seq++; 3466 tp->copied_seq++;
3464 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 3467 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3465 __skb_unlink(skb, skb->list); 3468 __skb_unlink(skb, &sk->sk_receive_queue);
3466 __kfree_skb(skb); 3469 __kfree_skb(skb);
3467 } 3470 }
3468 } 3471 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dd30dd137b74..a4d1eb9a0926 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -505,7 +505,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
505 505
506 /* Link BUFF into the send queue. */ 506 /* Link BUFF into the send queue. */
507 skb_header_release(buff); 507 skb_header_release(buff);
508 __skb_append(skb, buff); 508 __skb_append(skb, buff, &sk->sk_write_queue);
509 509
510 return 0; 510 return 0;
511} 511}
@@ -893,7 +893,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
893 893
894 /* Link BUFF into the send queue. */ 894 /* Link BUFF into the send queue. */
895 skb_header_release(buff); 895 skb_header_release(buff);
896 __skb_append(skb, buff); 896 __skb_append(skb, buff, &sk->sk_write_queue);
897 897
898 return 0; 898 return 0;
899} 899}
@@ -1238,7 +1238,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1238 tcp_skb_pcount(next_skb) != 1); 1238 tcp_skb_pcount(next_skb) != 1);
1239 1239
1240 /* Ok. We will be able to collapse the packet. */ 1240 /* Ok. We will be able to collapse the packet. */
1241 __skb_unlink(next_skb, next_skb->list); 1241 __skb_unlink(next_skb, &sk->sk_write_queue);
1242 1242
1243 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); 1243 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
1244 1244
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 6dafbb43b529..eb65b4925b51 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
988 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 988 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
989 return; 989 return;
990 } 990 }
991 /* Unlink tx_skb from list */
992 tx_skb->next = tx_skb->prev = NULL;
993 tx_skb->list = NULL;
994 991
995 /* Clear old Nr field + poll bit */ 992 /* Clear old Nr field + poll bit */
996 tx_skb->data[1] &= 0x0f; 993 tx_skb->data[1] &= 0x0f;
@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
1063 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); 1060 IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
1064 return; 1061 return;
1065 } 1062 }
1066 /* Unlink tx_skb from list */
1067 tx_skb->next = tx_skb->prev = NULL;
1068 tx_skb->list = NULL;
1069 1063
1070 /* Clear old Nr field + poll bit */ 1064 /* Clear old Nr field + poll bit */
1071 tx_skb->data[1] &= 0x0f; 1065 tx_skb->data[1] &= 0x0f;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 5de05a0bc0ff..8b5eefd70f03 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb *lapb)
78 if (!skb_prev) 78 if (!skb_prev)
79 skb_queue_head(&lapb->write_queue, skb); 79 skb_queue_head(&lapb->write_queue, skb);
80 else 80 else
81 skb_append(skb_prev, skb); 81 skb_append(skb_prev, skb, &lapb->write_queue);
82 skb_prev = skb; 82 skb_prev = skb;
83 } 83 }
84} 84}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 20b4cfebd74c..f49b82da8264 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
714 if (uaddr) 714 if (uaddr)
715 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 715 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
716 msg->msg_namelen = sizeof(*uaddr); 716 msg->msg_namelen = sizeof(*uaddr);
717 if (!skb->list) { 717 if (!skb->next) {
718dgram_free: 718dgram_free:
719 kfree_skb(skb); 719 kfree_skb(skb);
720 } 720 }
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index eba812a9c69c..571548619469 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
71 71
72 if (!ev->ind_prim && !ev->cfm_prim) { 72 if (!ev->ind_prim && !ev->cfm_prim) {
73 /* indicate or confirm not required */ 73 /* indicate or confirm not required */
74 if (!skb->list) 74 /* XXX this is not very pretty, perhaps we should store
75 * XXX indicate/confirm-needed state in the llc_conn_state_ev
76 * XXX control block of the SKB instead? -DaveM
77 */
78 if (!skb->next)
75 goto out_kfree_skb; 79 goto out_kfree_skb;
76 goto out_skb_put; 80 goto out_skb_put;
77 } 81 }
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 0627347b14b8..252c1b3ecd78 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk)
77 if (skb_prev == NULL) 77 if (skb_prev == NULL)
78 skb_queue_head(&sk->sk_write_queue, skb); 78 skb_queue_head(&sk->sk_write_queue, skb);
79 else 79 else
80 skb_append(skb_prev, skb); 80 skb_append(skb_prev, skb, &sk->sk_write_queue);
81 skb_prev = skb; 81 skb_prev = skb;
82 } 82 }
83} 83}
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7db7e1cedc3a..ae135e27799b 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk)
74 if (skb_prev == NULL) 74 if (skb_prev == NULL)
75 skb_queue_head(&sk->sk_write_queue, skb); 75 skb_queue_head(&sk->sk_write_queue, skb);
76 else 76 else
77 skb_append(skb_prev, skb); 77 skb_append(skb_prev, skb, &sk->sk_write_queue);
78 skb_prev = skb; 78 skb_prev = skb;
79 } 79 }
80} 80}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 091a66f06a35..4454afe4727e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4892 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 4892 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
4893 event = sctp_skb2event(skb); 4893 event = sctp_skb2event(skb);
4894 if (event->asoc == assoc) { 4894 if (event->asoc == assoc) {
4895 __skb_unlink(skb, skb->list); 4895 __skb_unlink(skb, &oldsk->sk_receive_queue);
4896 __skb_queue_tail(&newsk->sk_receive_queue, skb); 4896 __skb_queue_tail(&newsk->sk_receive_queue, skb);
4897 } 4897 }
4898 } 4898 }
@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4921 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 4921 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
4922 event = sctp_skb2event(skb); 4922 event = sctp_skb2event(skb);
4923 if (event->asoc == assoc) { 4923 if (event->asoc == assoc) {
4924 __skb_unlink(skb, skb->list); 4924 __skb_unlink(skb, &oldsp->pd_lobby);
4925 __skb_queue_tail(queue, skb); 4925 __skb_queue_tail(queue, skb);
4926 } 4926 }
4927 } 4927 }
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 8bbc279d6c99..ec2c857eae7f 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -50,9 +50,9 @@
50 50
51/* Forward declarations for internal helpers. */ 51/* Forward declarations for internal helpers. */
52static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, 52static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *); 53 struct sctp_ulpevent *);
54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, 54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *); 55 struct sctp_ulpevent *);
56 56
57/* 1st Level Abstractions */ 57/* 1st Level Abstractions */
58 58
@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
125 event = sctp_ulpq_order(ulpq, event); 125 event = sctp_ulpq_order(ulpq, event);
126 } 126 }
127 127
128 /* Send event to the ULP. */ 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
129 * very first SKB on the 'temp' list.
130 */
129 if (event) 131 if (event)
130 sctp_ulpq_tail_event(ulpq, event); 132 sctp_ulpq_tail_event(ulpq, event);
131 133
@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
158 return sctp_clear_pd(ulpq->asoc->base.sk); 160 return sctp_clear_pd(ulpq->asoc->base.sk);
159} 161}
160 162
161 163/* If the SKB of 'event' is on a list, it is the first such member
162 164 * of that list.
165 */
163int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) 166int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
164{ 167{
165 struct sock *sk = ulpq->asoc->base.sk; 168 struct sock *sk = ulpq->asoc->base.sk;
166 struct sk_buff_head *queue; 169 struct sk_buff_head *queue, *skb_list;
170 struct sk_buff *skb = sctp_event2skb(event);
167 int clear_pd = 0; 171 int clear_pd = 0;
168 172
173 skb_list = (struct sk_buff_head *) skb->prev;
174
169 /* If the socket is just going to throw this away, do not 175 /* If the socket is just going to throw this away, do not
170 * even try to deliver it. 176 * even try to deliver it.
171 */ 177 */
@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
197 /* If we are harvesting multiple skbs they will be 203 /* If we are harvesting multiple skbs they will be
198 * collected on a list. 204 * collected on a list.
199 */ 205 */
200 if (sctp_event2skb(event)->list) 206 if (skb_list)
201 sctp_skb_list_tail(sctp_event2skb(event)->list, queue); 207 sctp_skb_list_tail(skb_list, queue);
202 else 208 else
203 __skb_queue_tail(queue, sctp_event2skb(event)); 209 __skb_queue_tail(queue, skb);
204 210
205 /* Did we just complete partial delivery and need to get 211 /* Did we just complete partial delivery and need to get
206 * rolling again? Move pending data to the receive 212 * rolling again? Move pending data to the receive
@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
214 return 1; 220 return 1;
215 221
216out_free: 222out_free:
217 if (sctp_event2skb(event)->list) 223 if (skb_list)
218 sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); 224 sctp_queue_purge_ulpevents(skb_list);
219 else 225 else
220 sctp_ulpevent_free(event); 226 sctp_ulpevent_free(event);
227
221 return 0; 228 return 0;
222} 229}
223 230
@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
269 * payload was fragmented on the way and ip had to reassemble them. 276 * payload was fragmented on the way and ip had to reassemble them.
270 * We add the rest of skb's to the first skb's fraglist. 277 * We add the rest of skb's to the first skb's fraglist.
271 */ 278 */
272static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) 279static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
273{ 280{
274 struct sk_buff *pos; 281 struct sk_buff *pos;
275 struct sctp_ulpevent *event; 282 struct sctp_ulpevent *event;
@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
294 skb_shinfo(f_frag)->frag_list = pos; 301 skb_shinfo(f_frag)->frag_list = pos;
295 302
296 /* Remove the first fragment from the reassembly queue. */ 303 /* Remove the first fragment from the reassembly queue. */
297 __skb_unlink(f_frag, f_frag->list); 304 __skb_unlink(f_frag, queue);
298 while (pos) { 305 while (pos) {
299 306
300 pnext = pos->next; 307 pnext = pos->next;
@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
304 f_frag->data_len += pos->len; 311 f_frag->data_len += pos->len;
305 312
306 /* Remove the fragment from the reassembly queue. */ 313 /* Remove the fragment from the reassembly queue. */
307 __skb_unlink(pos, pos->list); 314 __skb_unlink(pos, queue);
308 315
309 /* Break if we have reached the last fragment. */ 316 /* Break if we have reached the last fragment. */
310 if (pos == l_frag) 317 if (pos == l_frag)
@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
375done: 382done:
376 return retval; 383 return retval;
377found: 384found:
378 retval = sctp_make_reassembled_event(first_frag, pos); 385 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
379 if (retval) 386 if (retval)
380 retval->msg_flags |= MSG_EOR; 387 retval->msg_flags |= MSG_EOR;
381 goto done; 388 goto done;
@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
435 * further. 442 * further.
436 */ 443 */
437done: 444done:
438 retval = sctp_make_reassembled_event(first_frag, last_frag); 445 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
439 if (retval && is_last) 446 if (retval && is_last)
440 retval->msg_flags |= MSG_EOR; 447 retval->msg_flags |= MSG_EOR;
441 448
@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
527 * further. 534 * further.
528 */ 535 */
529done: 536done:
530 retval = sctp_make_reassembled_event(first_frag, last_frag); 537 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
531 return retval; 538 return retval;
532} 539}
533 540
@@ -537,6 +544,7 @@ done:
537static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, 544static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
538 struct sctp_ulpevent *event) 545 struct sctp_ulpevent *event)
539{ 546{
547 struct sk_buff_head *event_list;
540 struct sk_buff *pos, *tmp; 548 struct sk_buff *pos, *tmp;
541 struct sctp_ulpevent *cevent; 549 struct sctp_ulpevent *cevent;
542 struct sctp_stream *in; 550 struct sctp_stream *in;
@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
547 ssn = event->ssn; 555 ssn = event->ssn;
548 in = &ulpq->asoc->ssnmap->in; 556 in = &ulpq->asoc->ssnmap->in;
549 557
558 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
559
550 /* We are holding the chunks by stream, by SSN. */ 560 /* We are holding the chunks by stream, by SSN. */
551 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 561 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
552 cevent = (struct sctp_ulpevent *) pos->cb; 562 cevent = (struct sctp_ulpevent *) pos->cb;
@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
567 /* Found it, so mark in the ssnmap. */ 577 /* Found it, so mark in the ssnmap. */
568 sctp_ssn_next(in, sid); 578 sctp_ssn_next(in, sid);
569 579
570 __skb_unlink(pos, pos->list); 580 __skb_unlink(pos, &ulpq->lobby);
571 581
572 /* Attach all gathered skbs to the event. */ 582 /* Attach all gathered skbs to the event. */
573 __skb_queue_tail(sctp_event2skb(event)->list, pos); 583 __skb_queue_tail(event_list, pos);
574 } 584 }
575} 585}
576 586
@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
626} 636}
627 637
628static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, 638static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
629 struct sctp_ulpevent *event) 639 struct sctp_ulpevent *event)
630{ 640{
631 __u16 sid, ssn; 641 __u16 sid, ssn;
632 struct sctp_stream *in; 642 struct sctp_stream *in;
@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
667{ 677{
668 struct sk_buff *pos, *tmp; 678 struct sk_buff *pos, *tmp;
669 struct sctp_ulpevent *cevent; 679 struct sctp_ulpevent *cevent;
670 struct sctp_ulpevent *event = NULL; 680 struct sctp_ulpevent *event;
671 struct sctp_stream *in; 681 struct sctp_stream *in;
672 struct sk_buff_head temp; 682 struct sk_buff_head temp;
673 __u16 csid, cssn; 683 __u16 csid, cssn;
@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
675 in = &ulpq->asoc->ssnmap->in; 685 in = &ulpq->asoc->ssnmap->in;
676 686
677 /* We are holding the chunks by stream, by SSN. */ 687 /* We are holding the chunks by stream, by SSN. */
688 skb_queue_head_init(&temp);
689 event = NULL;
678 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 690 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
679 cevent = (struct sctp_ulpevent *) pos->cb; 691 cevent = (struct sctp_ulpevent *) pos->cb;
680 csid = cevent->stream; 692 csid = cevent->stream;
@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
686 /* Found it, so mark in the ssnmap. */ 698 /* Found it, so mark in the ssnmap. */
687 sctp_ssn_next(in, csid); 699 sctp_ssn_next(in, csid);
688 700
689 __skb_unlink(pos, pos->list); 701 __skb_unlink(pos, &ulpq->lobby);
690 if (!event) { 702 if (!event) {
691 /* Create a temporary list to collect chunks on. */ 703 /* Create a temporary list to collect chunks on. */
692 event = sctp_skb2event(pos); 704 event = sctp_skb2event(pos);
693 skb_queue_head_init(&temp);
694 __skb_queue_tail(&temp, sctp_event2skb(event)); 705 __skb_queue_tail(&temp, sctp_event2skb(event));
695 } else { 706 } else {
696 /* Attach all gathered skbs to the event. */ 707 /* Attach all gathered skbs to the event. */
697 __skb_queue_tail(sctp_event2skb(event)->list, pos); 708 __skb_queue_tail(&temp, pos);
698 } 709 }
699 } 710 }
700 711
701 /* Send event to the ULP. */ 712 /* Send event to the ULP. 'event' is the sctp_ulpevent for
713 * very first SKB on the 'temp' list.
714 */
702 if (event) 715 if (event)
703 sctp_ulpq_tail_event(ulpq, event); 716 sctp_ulpq_tail_event(ulpq, event);
704} 717}
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 4bd95c8f5934..46252d2807bb 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -286,16 +286,16 @@ void unix_gc(void)
286 skb = skb_peek(&s->sk_receive_queue); 286 skb = skb_peek(&s->sk_receive_queue);
287 while (skb && 287 while (skb &&
288 skb != (struct sk_buff *)&s->sk_receive_queue) { 288 skb != (struct sk_buff *)&s->sk_receive_queue) {
289 nextsk=skb->next; 289 nextsk = skb->next;
290 /* 290 /*
291 * Do we have file descriptors ? 291 * Do we have file descriptors ?
292 */ 292 */
293 if(UNIXCB(skb).fp) 293 if (UNIXCB(skb).fp) {
294 { 294 __skb_unlink(skb,
295 __skb_unlink(skb, skb->list); 295 &s->sk_receive_queue);
296 __skb_queue_tail(&hitlist,skb); 296 __skb_queue_tail(&hitlist, skb);
297 } 297 }
298 skb=nextsk; 298 skb = nextsk;
299 } 299 }
300 spin_unlock(&s->sk_receive_queue.lock); 300 spin_unlock(&s->sk_receive_queue.lock);
301 } 301 }
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 7fd872ad0c20..e20cfadad4d9 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk)
80 if (!skb_prev) 80 if (!skb_prev)
81 skb_queue_head(&sk->sk_write_queue, skb); 81 skb_queue_head(&sk->sk_write_queue, skb);
82 else 82 else
83 skb_append(skb_prev, skb); 83 skb_append(skb_prev, skb, &sk->sk_write_queue);
84 skb_prev = skb; 84 skb_prev = skb;
85 } 85 }
86} 86}