aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/atm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-08-09 22:25:21 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:31:14 -0400
commit8728b834b226ffcf2c94a58530090e292af2a7bf (patch)
tree2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /drivers/atm
parent6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff)
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely redundant. All SKB list removal callers know which list the SKB is on, so storing this in sk_buff does nothing other than taking up some space. Two tricky bits were SCTP, which I took care of, and two ATM drivers which Francois Romieu <romieu@fr.zoreil.com> fixed up. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'drivers/atm')
-rw-r--r--drivers/atm/nicstar.c157
-rw-r--r--drivers/atm/nicstar.h16
-rw-r--r--drivers/atm/zatm.c8
3 files changed, 93 insertions, 88 deletions
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b2a7b754fd14..a0e3bd861f1c 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -214,8 +214,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
214static void __devinit ns_init_card_error(ns_dev *card, int error); 214static void __devinit ns_init_card_error(ns_dev *card, int error);
215static scq_info *get_scq(int size, u32 scd); 215static scq_info *get_scq(int size, u32 scd);
216static void free_scq(scq_info *scq, struct atm_vcc *vcc); 216static void free_scq(scq_info *scq, struct atm_vcc *vcc);
217static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 217static void push_rxbufs(ns_dev *, struct sk_buff *);
218 u32 handle2, u32 addr2);
219static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs); 218static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
220static int ns_open(struct atm_vcc *vcc); 219static int ns_open(struct atm_vcc *vcc);
221static void ns_close(struct atm_vcc *vcc); 220static void ns_close(struct atm_vcc *vcc);
@@ -766,6 +765,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
766 ns_init_card_error(card, error); 765 ns_init_card_error(card, error);
767 return error; 766 return error;
768 } 767 }
768 NS_SKB_CB(hb)->buf_type = BUF_NONE;
769 skb_queue_tail(&card->hbpool.queue, hb); 769 skb_queue_tail(&card->hbpool.queue, hb);
770 card->hbpool.count++; 770 card->hbpool.count++;
771 } 771 }
@@ -786,9 +786,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
786 ns_init_card_error(card, error); 786 ns_init_card_error(card, error);
787 return error; 787 return error;
788 } 788 }
789 NS_SKB_CB(lb)->buf_type = BUF_LG;
789 skb_queue_tail(&card->lbpool.queue, lb); 790 skb_queue_tail(&card->lbpool.queue, lb);
790 skb_reserve(lb, NS_SMBUFSIZE); 791 skb_reserve(lb, NS_SMBUFSIZE);
791 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 792 push_rxbufs(card, lb);
792 /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 793 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
793 if (j == 1) 794 if (j == 1)
794 { 795 {
@@ -822,9 +823,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
822 ns_init_card_error(card, error); 823 ns_init_card_error(card, error);
823 return error; 824 return error;
824 } 825 }
826 NS_SKB_CB(sb)->buf_type = BUF_SM;
825 skb_queue_tail(&card->sbpool.queue, sb); 827 skb_queue_tail(&card->sbpool.queue, sb);
826 skb_reserve(sb, NS_AAL0_HEADER); 828 skb_reserve(sb, NS_AAL0_HEADER);
827 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 829 push_rxbufs(card, sb);
828 } 830 }
829 /* Test for strange behaviour which leads to crashes */ 831 /* Test for strange behaviour which leads to crashes */
830 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) 832 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
@@ -852,6 +854,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
852 ns_init_card_error(card, error); 854 ns_init_card_error(card, error);
853 return error; 855 return error;
854 } 856 }
857 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
855 skb_queue_tail(&card->iovpool.queue, iovb); 858 skb_queue_tail(&card->iovpool.queue, iovb);
856 card->iovpool.count++; 859 card->iovpool.count++;
857 } 860 }
@@ -1078,12 +1081,18 @@ static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1078 1081
1079/* The handles passed must be pointers to the sk_buff containing the small 1082/* The handles passed must be pointers to the sk_buff containing the small
1080 or large buffer(s) cast to u32. */ 1083 or large buffer(s) cast to u32. */
1081static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, 1084static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
1082 u32 handle2, u32 addr2)
1083{ 1085{
1086 struct ns_skb_cb *cb = NS_SKB_CB(skb);
1087 u32 handle1, addr1;
1088 u32 handle2, addr2;
1084 u32 stat; 1089 u32 stat;
1085 unsigned long flags; 1090 unsigned long flags;
1086 1091
1092 /* *BARF* */
1093 handle2 = addr2 = 0;
1094 handle1 = (u32)skb;
1095 addr1 = (u32)virt_to_bus(skb->data);
1087 1096
1088#ifdef GENERAL_DEBUG 1097#ifdef GENERAL_DEBUG
1089 if (!addr1) 1098 if (!addr1)
@@ -1093,7 +1102,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1093 stat = readl(card->membase + STAT); 1102 stat = readl(card->membase + STAT);
1094 card->sbfqc = ns_stat_sfbqc_get(stat); 1103 card->sbfqc = ns_stat_sfbqc_get(stat);
1095 card->lbfqc = ns_stat_lfbqc_get(stat); 1104 card->lbfqc = ns_stat_lfbqc_get(stat);
1096 if (type == BUF_SM) 1105 if (cb->buf_type == BUF_SM)
1097 { 1106 {
1098 if (!addr2) 1107 if (!addr2)
1099 { 1108 {
@@ -1111,7 +1120,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1111 } 1120 }
1112 } 1121 }
1113 } 1122 }
1114 else /* type == BUF_LG */ 1123 else /* buf_type == BUF_LG */
1115 { 1124 {
1116 if (!addr2) 1125 if (!addr2)
1117 { 1126 {
@@ -1132,26 +1141,26 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1132 1141
1133 if (addr2) 1142 if (addr2)
1134 { 1143 {
1135 if (type == BUF_SM) 1144 if (cb->buf_type == BUF_SM)
1136 { 1145 {
1137 if (card->sbfqc >= card->sbnr.max) 1146 if (card->sbfqc >= card->sbnr.max)
1138 { 1147 {
1139 skb_unlink((struct sk_buff *) handle1); 1148 skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
1140 dev_kfree_skb_any((struct sk_buff *) handle1); 1149 dev_kfree_skb_any((struct sk_buff *) handle1);
1141 skb_unlink((struct sk_buff *) handle2); 1150 skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
1142 dev_kfree_skb_any((struct sk_buff *) handle2); 1151 dev_kfree_skb_any((struct sk_buff *) handle2);
1143 return; 1152 return;
1144 } 1153 }
1145 else 1154 else
1146 card->sbfqc += 2; 1155 card->sbfqc += 2;
1147 } 1156 }
1148 else /* (type == BUF_LG) */ 1157 else /* (buf_type == BUF_LG) */
1149 { 1158 {
1150 if (card->lbfqc >= card->lbnr.max) 1159 if (card->lbfqc >= card->lbnr.max)
1151 { 1160 {
1152 skb_unlink((struct sk_buff *) handle1); 1161 skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
1153 dev_kfree_skb_any((struct sk_buff *) handle1); 1162 dev_kfree_skb_any((struct sk_buff *) handle1);
1154 skb_unlink((struct sk_buff *) handle2); 1163 skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
1155 dev_kfree_skb_any((struct sk_buff *) handle2); 1164 dev_kfree_skb_any((struct sk_buff *) handle2);
1156 return; 1165 return;
1157 } 1166 }
@@ -1166,12 +1175,12 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1166 writel(handle2, card->membase + DR2); 1175 writel(handle2, card->membase + DR2);
1167 writel(addr1, card->membase + DR1); 1176 writel(addr1, card->membase + DR1);
1168 writel(handle1, card->membase + DR0); 1177 writel(handle1, card->membase + DR0);
1169 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD); 1178 writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
1170 1179
1171 spin_unlock_irqrestore(&card->res_lock, flags); 1180 spin_unlock_irqrestore(&card->res_lock, flags);
1172 1181
1173 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, 1182 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1174 (type == BUF_SM ? "small" : "large"), addr1, addr2); 1183 (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
1175 } 1184 }
1176 1185
1177 if (!card->efbie && card->sbfqc >= card->sbnr.min && 1186 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
@@ -1322,9 +1331,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1322 card->efbie = 0; 1331 card->efbie = 0;
1323 break; 1332 break;
1324 } 1333 }
1334 NS_SKB_CB(sb)->buf_type = BUF_SM;
1325 skb_queue_tail(&card->sbpool.queue, sb); 1335 skb_queue_tail(&card->sbpool.queue, sb);
1326 skb_reserve(sb, NS_AAL0_HEADER); 1336 skb_reserve(sb, NS_AAL0_HEADER);
1327 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 1337 push_rxbufs(card, sb);
1328 } 1338 }
1329 card->sbfqc = i; 1339 card->sbfqc = i;
1330 process_rsq(card); 1340 process_rsq(card);
@@ -1348,9 +1358,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1348 card->efbie = 0; 1358 card->efbie = 0;
1349 break; 1359 break;
1350 } 1360 }
1361 NS_SKB_CB(lb)->buf_type = BUF_LG;
1351 skb_queue_tail(&card->lbpool.queue, lb); 1362 skb_queue_tail(&card->lbpool.queue, lb);
1352 skb_reserve(lb, NS_SMBUFSIZE); 1363 skb_reserve(lb, NS_SMBUFSIZE);
1353 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 1364 push_rxbufs(card, lb);
1354 } 1365 }
1355 card->lbfqc = i; 1366 card->lbfqc = i;
1356 process_rsq(card); 1367 process_rsq(card);
@@ -2227,6 +2238,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2227 recycle_rx_buf(card, skb); 2238 recycle_rx_buf(card, skb);
2228 return; 2239 return;
2229 } 2240 }
2241 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2230 } 2242 }
2231 else 2243 else
2232 if (--card->iovpool.count < card->iovnr.min) 2244 if (--card->iovpool.count < card->iovnr.min)
@@ -2234,6 +2246,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2234 struct sk_buff *new_iovb; 2246 struct sk_buff *new_iovb;
2235 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) 2247 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2236 { 2248 {
2249 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2237 skb_queue_tail(&card->iovpool.queue, new_iovb); 2250 skb_queue_tail(&card->iovpool.queue, new_iovb);
2238 card->iovpool.count++; 2251 card->iovpool.count++;
2239 } 2252 }
@@ -2264,7 +2277,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2264 2277
2265 if (NS_SKB(iovb)->iovcnt == 1) 2278 if (NS_SKB(iovb)->iovcnt == 1)
2266 { 2279 {
2267 if (skb->list != &card->sbpool.queue) 2280 if (NS_SKB_CB(skb)->buf_type != BUF_SM)
2268 { 2281 {
2269 printk("nicstar%d: Expected a small buffer, and this is not one.\n", 2282 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2270 card->index); 2283 card->index);
@@ -2278,7 +2291,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2278 } 2291 }
2279 else /* NS_SKB(iovb)->iovcnt >= 2 */ 2292 else /* NS_SKB(iovb)->iovcnt >= 2 */
2280 { 2293 {
2281 if (skb->list != &card->lbpool.queue) 2294 if (NS_SKB_CB(skb)->buf_type != BUF_LG)
2282 { 2295 {
2283 printk("nicstar%d: Expected a large buffer, and this is not one.\n", 2296 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2284 card->index); 2297 card->index);
@@ -2322,8 +2335,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2322 /* skb points to a small buffer */ 2335 /* skb points to a small buffer */
2323 if (!atm_charge(vcc, skb->truesize)) 2336 if (!atm_charge(vcc, skb->truesize))
2324 { 2337 {
2325 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 2338 push_rxbufs(card, skb);
2326 0, 0);
2327 atomic_inc(&vcc->stats->rx_drop); 2339 atomic_inc(&vcc->stats->rx_drop);
2328 } 2340 }
2329 else 2341 else
@@ -2350,8 +2362,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2350 { 2362 {
2351 if (!atm_charge(vcc, sb->truesize)) 2363 if (!atm_charge(vcc, sb->truesize))
2352 { 2364 {
2353 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2365 push_rxbufs(card, sb);
2354 0, 0);
2355 atomic_inc(&vcc->stats->rx_drop); 2366 atomic_inc(&vcc->stats->rx_drop);
2356 } 2367 }
2357 else 2368 else
@@ -2367,16 +2378,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2367 atomic_inc(&vcc->stats->rx); 2378 atomic_inc(&vcc->stats->rx);
2368 } 2379 }
2369 2380
2370 push_rxbufs(card, BUF_LG, (u32) skb, 2381 push_rxbufs(card, skb);
2371 (u32) virt_to_bus(skb->data), 0, 0);
2372 2382
2373 } 2383 }
2374 else /* len > NS_SMBUFSIZE, the usual case */ 2384 else /* len > NS_SMBUFSIZE, the usual case */
2375 { 2385 {
2376 if (!atm_charge(vcc, skb->truesize)) 2386 if (!atm_charge(vcc, skb->truesize))
2377 { 2387 {
2378 push_rxbufs(card, BUF_LG, (u32) skb, 2388 push_rxbufs(card, skb);
2379 (u32) virt_to_bus(skb->data), 0, 0);
2380 atomic_inc(&vcc->stats->rx_drop); 2389 atomic_inc(&vcc->stats->rx_drop);
2381 } 2390 }
2382 else 2391 else
@@ -2394,8 +2403,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2394 atomic_inc(&vcc->stats->rx); 2403 atomic_inc(&vcc->stats->rx);
2395 } 2404 }
2396 2405
2397 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2406 push_rxbufs(card, sb);
2398 0, 0);
2399 2407
2400 } 2408 }
2401 2409
@@ -2430,6 +2438,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2430 card->hbpool.count++; 2438 card->hbpool.count++;
2431 } 2439 }
2432 } 2440 }
2441 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2433 } 2442 }
2434 else 2443 else
2435 if (--card->hbpool.count < card->hbnr.min) 2444 if (--card->hbpool.count < card->hbnr.min)
@@ -2437,6 +2446,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2437 struct sk_buff *new_hb; 2446 struct sk_buff *new_hb;
2438 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2447 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2439 { 2448 {
2449 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2440 skb_queue_tail(&card->hbpool.queue, new_hb); 2450 skb_queue_tail(&card->hbpool.queue, new_hb);
2441 card->hbpool.count++; 2451 card->hbpool.count++;
2442 } 2452 }
@@ -2444,6 +2454,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2444 { 2454 {
2445 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) 2455 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2446 { 2456 {
2457 NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2447 skb_queue_tail(&card->hbpool.queue, new_hb); 2458 skb_queue_tail(&card->hbpool.queue, new_hb);
2448 card->hbpool.count++; 2459 card->hbpool.count++;
2449 } 2460 }
@@ -2473,8 +2484,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2473 remaining = len - iov->iov_len; 2484 remaining = len - iov->iov_len;
2474 iov++; 2485 iov++;
2475 /* Free the small buffer */ 2486 /* Free the small buffer */
2476 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 2487 push_rxbufs(card, sb);
2477 0, 0);
2478 2488
2479 /* Copy all large buffers to the huge buffer and free them */ 2489 /* Copy all large buffers to the huge buffer and free them */
2480 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) 2490 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
@@ -2485,8 +2495,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2485 skb_put(hb, tocopy); 2495 skb_put(hb, tocopy);
2486 iov++; 2496 iov++;
2487 remaining -= tocopy; 2497 remaining -= tocopy;
2488 push_rxbufs(card, BUF_LG, (u32) lb, 2498 push_rxbufs(card, lb);
2489 (u32) virt_to_bus(lb->data), 0, 0);
2490 } 2499 }
2491#ifdef EXTRA_DEBUG 2500#ifdef EXTRA_DEBUG
2492 if (remaining != 0 || hb->len != len) 2501 if (remaining != 0 || hb->len != len)
@@ -2527,9 +2536,10 @@ static void ns_sb_destructor(struct sk_buff *sb)
2527 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2536 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2528 if (sb == NULL) 2537 if (sb == NULL)
2529 break; 2538 break;
2539 NS_SKB_CB(sb)->buf_type = BUF_SM;
2530 skb_queue_tail(&card->sbpool.queue, sb); 2540 skb_queue_tail(&card->sbpool.queue, sb);
2531 skb_reserve(sb, NS_AAL0_HEADER); 2541 skb_reserve(sb, NS_AAL0_HEADER);
2532 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2542 push_rxbufs(card, sb);
2533 } while (card->sbfqc < card->sbnr.min); 2543 } while (card->sbfqc < card->sbnr.min);
2534} 2544}
2535 2545
@@ -2550,9 +2560,10 @@ static void ns_lb_destructor(struct sk_buff *lb)
2550 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2560 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2551 if (lb == NULL) 2561 if (lb == NULL)
2552 break; 2562 break;
2563 NS_SKB_CB(lb)->buf_type = BUF_LG;
2553 skb_queue_tail(&card->lbpool.queue, lb); 2564 skb_queue_tail(&card->lbpool.queue, lb);
2554 skb_reserve(lb, NS_SMBUFSIZE); 2565 skb_reserve(lb, NS_SMBUFSIZE);
2555 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2566 push_rxbufs(card, lb);
2556 } while (card->lbfqc < card->lbnr.min); 2567 } while (card->lbfqc < card->lbnr.min);
2557} 2568}
2558 2569
@@ -2569,6 +2580,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
2569 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2580 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2570 if (hb == NULL) 2581 if (hb == NULL)
2571 break; 2582 break;
2583 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2572 skb_queue_tail(&card->hbpool.queue, hb); 2584 skb_queue_tail(&card->hbpool.queue, hb);
2573 card->hbpool.count++; 2585 card->hbpool.count++;
2574 } 2586 }
@@ -2577,45 +2589,25 @@ static void ns_hb_destructor(struct sk_buff *hb)
2577#endif /* NS_USE_DESTRUCTORS */ 2589#endif /* NS_USE_DESTRUCTORS */
2578 2590
2579 2591
2580
2581static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) 2592static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2582{ 2593{
2583 if (skb->list == &card->sbpool.queue) 2594 struct ns_skb_cb *cb = NS_SKB_CB(skb);
2584 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2585 else if (skb->list == &card->lbpool.queue)
2586 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2587 else
2588 {
2589 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2590 dev_kfree_skb_any(skb);
2591 }
2592}
2593 2595
2596 if (unlikely(cb->buf_type == BUF_NONE)) {
2597 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2598 dev_kfree_skb_any(skb);
2599 } else
2600 push_rxbufs(card, skb);
2601}
2594 2602
2595 2603
2596static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) 2604static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2597{ 2605{
2598 struct sk_buff *skb; 2606 while (count-- > 0)
2599 2607 recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
2600 for (; count > 0; count--)
2601 {
2602 skb = (struct sk_buff *) (iov++)->iov_base;
2603 if (skb->list == &card->sbpool.queue)
2604 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2605 0, 0);
2606 else if (skb->list == &card->lbpool.queue)
2607 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2608 0, 0);
2609 else
2610 {
2611 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2612 dev_kfree_skb_any(skb);
2613 }
2614 }
2615} 2608}
2616 2609
2617 2610
2618
2619static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) 2611static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2620{ 2612{
2621 if (card->iovpool.count < card->iovnr.max) 2613 if (card->iovpool.count < card->iovnr.max)
@@ -2631,7 +2623,7 @@ static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2631 2623
2632static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) 2624static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2633{ 2625{
2634 skb_unlink(sb); 2626 skb_unlink(sb, &card->sbpool.queue);
2635#ifdef NS_USE_DESTRUCTORS 2627#ifdef NS_USE_DESTRUCTORS
2636 if (card->sbfqc < card->sbnr.min) 2628 if (card->sbfqc < card->sbnr.min)
2637#else 2629#else
@@ -2640,10 +2632,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2640 struct sk_buff *new_sb; 2632 struct sk_buff *new_sb;
2641 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2633 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2642 { 2634 {
2635 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2643 skb_queue_tail(&card->sbpool.queue, new_sb); 2636 skb_queue_tail(&card->sbpool.queue, new_sb);
2644 skb_reserve(new_sb, NS_AAL0_HEADER); 2637 skb_reserve(new_sb, NS_AAL0_HEADER);
2645 push_rxbufs(card, BUF_SM, (u32) new_sb, 2638 push_rxbufs(card, new_sb);
2646 (u32) virt_to_bus(new_sb->data), 0, 0);
2647 } 2639 }
2648 } 2640 }
2649 if (card->sbfqc < card->sbnr.init) 2641 if (card->sbfqc < card->sbnr.init)
@@ -2652,10 +2644,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2652 struct sk_buff *new_sb; 2644 struct sk_buff *new_sb;
2653 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) 2645 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2654 { 2646 {
2647 NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2655 skb_queue_tail(&card->sbpool.queue, new_sb); 2648 skb_queue_tail(&card->sbpool.queue, new_sb);
2656 skb_reserve(new_sb, NS_AAL0_HEADER); 2649 skb_reserve(new_sb, NS_AAL0_HEADER);
2657 push_rxbufs(card, BUF_SM, (u32) new_sb, 2650 push_rxbufs(card, new_sb);
2658 (u32) virt_to_bus(new_sb->data), 0, 0);
2659 } 2651 }
2660 } 2652 }
2661} 2653}
@@ -2664,7 +2656,7 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2664 2656
2665static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) 2657static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2666{ 2658{
2667 skb_unlink(lb); 2659 skb_unlink(lb, &card->lbpool.queue);
2668#ifdef NS_USE_DESTRUCTORS 2660#ifdef NS_USE_DESTRUCTORS
2669 if (card->lbfqc < card->lbnr.min) 2661 if (card->lbfqc < card->lbnr.min)
2670#else 2662#else
@@ -2673,10 +2665,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2673 struct sk_buff *new_lb; 2665 struct sk_buff *new_lb;
2674 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2666 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2675 { 2667 {
2668 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2676 skb_queue_tail(&card->lbpool.queue, new_lb); 2669 skb_queue_tail(&card->lbpool.queue, new_lb);
2677 skb_reserve(new_lb, NS_SMBUFSIZE); 2670 skb_reserve(new_lb, NS_SMBUFSIZE);
2678 push_rxbufs(card, BUF_LG, (u32) new_lb, 2671 push_rxbufs(card, new_lb);
2679 (u32) virt_to_bus(new_lb->data), 0, 0);
2680 } 2672 }
2681 } 2673 }
2682 if (card->lbfqc < card->lbnr.init) 2674 if (card->lbfqc < card->lbnr.init)
@@ -2685,10 +2677,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2685 struct sk_buff *new_lb; 2677 struct sk_buff *new_lb;
2686 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) 2678 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2687 { 2679 {
2680 NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2688 skb_queue_tail(&card->lbpool.queue, new_lb); 2681 skb_queue_tail(&card->lbpool.queue, new_lb);
2689 skb_reserve(new_lb, NS_SMBUFSIZE); 2682 skb_reserve(new_lb, NS_SMBUFSIZE);
2690 push_rxbufs(card, BUF_LG, (u32) new_lb, 2683 push_rxbufs(card, new_lb);
2691 (u32) virt_to_bus(new_lb->data), 0, 0);
2692 } 2684 }
2693 } 2685 }
2694} 2686}
@@ -2880,9 +2872,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2880 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2872 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2881 if (sb == NULL) 2873 if (sb == NULL)
2882 return -ENOMEM; 2874 return -ENOMEM;
2875 NS_SKB_CB(sb)->buf_type = BUF_SM;
2883 skb_queue_tail(&card->sbpool.queue, sb); 2876 skb_queue_tail(&card->sbpool.queue, sb);
2884 skb_reserve(sb, NS_AAL0_HEADER); 2877 skb_reserve(sb, NS_AAL0_HEADER);
2885 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); 2878 push_rxbufs(card, sb);
2886 } 2879 }
2887 break; 2880 break;
2888 2881
@@ -2894,9 +2887,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2894 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2887 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2895 if (lb == NULL) 2888 if (lb == NULL)
2896 return -ENOMEM; 2889 return -ENOMEM;
2890 NS_SKB_CB(lb)->buf_type = BUF_LG;
2897 skb_queue_tail(&card->lbpool.queue, lb); 2891 skb_queue_tail(&card->lbpool.queue, lb);
2898 skb_reserve(lb, NS_SMBUFSIZE); 2892 skb_reserve(lb, NS_SMBUFSIZE);
2899 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); 2893 push_rxbufs(card, lb);
2900 } 2894 }
2901 break; 2895 break;
2902 2896
@@ -2923,6 +2917,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2923 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2917 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2924 if (hb == NULL) 2918 if (hb == NULL)
2925 return -ENOMEM; 2919 return -ENOMEM;
2920 NS_SKB_CB(hb)->buf_type = BUF_NONE;
2926 ns_grab_int_lock(card, flags); 2921 ns_grab_int_lock(card, flags);
2927 skb_queue_tail(&card->hbpool.queue, hb); 2922 skb_queue_tail(&card->hbpool.queue, hb);
2928 card->hbpool.count++; 2923 card->hbpool.count++;
@@ -2953,6 +2948,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2953 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2948 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2954 if (iovb == NULL) 2949 if (iovb == NULL)
2955 return -ENOMEM; 2950 return -ENOMEM;
2951 NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2956 ns_grab_int_lock(card, flags); 2952 ns_grab_int_lock(card, flags);
2957 skb_queue_tail(&card->iovpool.queue, iovb); 2953 skb_queue_tail(&card->iovpool.queue, iovb);
2958 card->iovpool.count++; 2954 card->iovpool.count++;
@@ -2979,17 +2975,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2979} 2975}
2980 2976
2981 2977
2982
2983static void which_list(ns_dev *card, struct sk_buff *skb) 2978static void which_list(ns_dev *card, struct sk_buff *skb)
2984{ 2979{
2985 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ? 2980 printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
2986 "small" : skb->list == &card->lbpool.queue ? "large" :
2987 skb->list == &card->hbpool.queue ? "huge" :
2988 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2989} 2981}
2990 2982
2991 2983
2992
2993static void ns_poll(unsigned long arg) 2984static void ns_poll(unsigned long arg)
2994{ 2985{
2995 int i; 2986 int i;
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index ea83c46c8ba5..5997bcb45b59 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -103,8 +103,14 @@
103 103
104#define NS_IOREMAP_SIZE 4096 104#define NS_IOREMAP_SIZE 4096
105 105
106#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ 106/*
107#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ 107 * BUF_XX distinguish the Rx buffers depending on their (small/large) size.
108 * BUG_SM and BUG_LG are both used by the driver and the device.
109 * BUF_NONE is only used by the driver.
110 */
111#define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */
112#define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */
113#define BUF_NONE 0xffffffff /* Software only: */
108 114
109#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ 115#define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */
110#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ 116#define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \
@@ -684,6 +690,12 @@ enum ns_regs
684/* Device driver structures ***************************************************/ 690/* Device driver structures ***************************************************/
685 691
686 692
693struct ns_skb_cb {
694 u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
695};
696
697#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
698
687typedef struct tsq_info 699typedef struct tsq_info
688{ 700{
689 void *org; 701 void *org;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a2b236a966e0..85fee9530fa9 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
417 chan = (here[3] & uPD98401_AAL5_CHAN) >> 417 chan = (here[3] & uPD98401_AAL5_CHAN) >>
418 uPD98401_AAL5_CHAN_SHIFT; 418 uPD98401_AAL5_CHAN_SHIFT;
419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { 419 if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
420 int pos = ZATM_VCC(vcc)->pool;
421
420 vcc = zatm_dev->rx_map[chan]; 422 vcc = zatm_dev->rx_map[chan];
421 if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool]) 423 if (skb == zatm_dev->last_free[pos])
422 zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL; 424 zatm_dev->last_free[pos] = NULL;
423 skb_unlink(skb); 425 skb_unlink(skb, zatm_dev->pool + pos);
424 } 426 }
425 else { 427 else {
426 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " 428 printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "