diff options
| author | David S. Miller <davem@davemloft.net> | 2005-08-09 22:25:21 -0400 |
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:31:14 -0400 |
| commit | 8728b834b226ffcf2c94a58530090e292af2a7bf (patch) | |
| tree | 2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /drivers | |
| parent | 6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff) | |
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely
redundant. All SKB list removal callers know which list the
SKB is on, so storing this in sk_buff does nothing other than
taking up some space.
Two tricky bits were SCTP, which I took care of, and two ATM
drivers which Francois Romieu <romieu@fr.zoreil.com> fixed
up.
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/atm/nicstar.c | 157 | ||||
| -rw-r--r-- | drivers/atm/nicstar.h | 16 | ||||
| -rw-r--r-- | drivers/atm/zatm.c | 8 | ||||
| -rw-r--r-- | drivers/bluetooth/bfusb.c | 8 | ||||
| -rw-r--r-- | drivers/ieee1394/ieee1394_core.c | 4 | ||||
| -rw-r--r-- | drivers/isdn/act2000/capi.c | 2 | ||||
| -rw-r--r-- | drivers/net/shaper.c | 50 | ||||
| -rw-r--r-- | drivers/net/wan/sdla_fr.c | 22 | ||||
| -rw-r--r-- | drivers/usb/net/usbnet.c | 21 |
9 files changed, 119 insertions, 169 deletions
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index b2a7b754fd14..a0e3bd861f1c 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c | |||
| @@ -214,8 +214,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev); | |||
| 214 | static void __devinit ns_init_card_error(ns_dev *card, int error); | 214 | static void __devinit ns_init_card_error(ns_dev *card, int error); |
| 215 | static scq_info *get_scq(int size, u32 scd); | 215 | static scq_info *get_scq(int size, u32 scd); |
| 216 | static void free_scq(scq_info *scq, struct atm_vcc *vcc); | 216 | static void free_scq(scq_info *scq, struct atm_vcc *vcc); |
| 217 | static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, | 217 | static void push_rxbufs(ns_dev *, struct sk_buff *); |
| 218 | u32 handle2, u32 addr2); | ||
| 219 | static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs); | 218 | static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs); |
| 220 | static int ns_open(struct atm_vcc *vcc); | 219 | static int ns_open(struct atm_vcc *vcc); |
| 221 | static void ns_close(struct atm_vcc *vcc); | 220 | static void ns_close(struct atm_vcc *vcc); |
| @@ -766,6 +765,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) | |||
| 766 | ns_init_card_error(card, error); | 765 | ns_init_card_error(card, error); |
| 767 | return error; | 766 | return error; |
| 768 | } | 767 | } |
| 768 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | ||
| 769 | skb_queue_tail(&card->hbpool.queue, hb); | 769 | skb_queue_tail(&card->hbpool.queue, hb); |
| 770 | card->hbpool.count++; | 770 | card->hbpool.count++; |
| 771 | } | 771 | } |
| @@ -786,9 +786,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) | |||
| 786 | ns_init_card_error(card, error); | 786 | ns_init_card_error(card, error); |
| 787 | return error; | 787 | return error; |
| 788 | } | 788 | } |
| 789 | NS_SKB_CB(lb)->buf_type = BUF_LG; | ||
| 789 | skb_queue_tail(&card->lbpool.queue, lb); | 790 | skb_queue_tail(&card->lbpool.queue, lb); |
| 790 | skb_reserve(lb, NS_SMBUFSIZE); | 791 | skb_reserve(lb, NS_SMBUFSIZE); |
| 791 | push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); | 792 | push_rxbufs(card, lb); |
| 792 | /* Due to the implementation of push_rxbufs() this is 1, not 0 */ | 793 | /* Due to the implementation of push_rxbufs() this is 1, not 0 */ |
| 793 | if (j == 1) | 794 | if (j == 1) |
| 794 | { | 795 | { |
| @@ -822,9 +823,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) | |||
| 822 | ns_init_card_error(card, error); | 823 | ns_init_card_error(card, error); |
| 823 | return error; | 824 | return error; |
| 824 | } | 825 | } |
| 826 | NS_SKB_CB(sb)->buf_type = BUF_SM; | ||
| 825 | skb_queue_tail(&card->sbpool.queue, sb); | 827 | skb_queue_tail(&card->sbpool.queue, sb); |
| 826 | skb_reserve(sb, NS_AAL0_HEADER); | 828 | skb_reserve(sb, NS_AAL0_HEADER); |
| 827 | push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); | 829 | push_rxbufs(card, sb); |
| 828 | } | 830 | } |
| 829 | /* Test for strange behaviour which leads to crashes */ | 831 | /* Test for strange behaviour which leads to crashes */ |
| 830 | if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) | 832 | if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) |
| @@ -852,6 +854,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) | |||
| 852 | ns_init_card_error(card, error); | 854 | ns_init_card_error(card, error); |
| 853 | return error; | 855 | return error; |
| 854 | } | 856 | } |
| 857 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | ||
| 855 | skb_queue_tail(&card->iovpool.queue, iovb); | 858 | skb_queue_tail(&card->iovpool.queue, iovb); |
| 856 | card->iovpool.count++; | 859 | card->iovpool.count++; |
| 857 | } | 860 | } |
| @@ -1078,12 +1081,18 @@ static void free_scq(scq_info *scq, struct atm_vcc *vcc) | |||
| 1078 | 1081 | ||
| 1079 | /* The handles passed must be pointers to the sk_buff containing the small | 1082 | /* The handles passed must be pointers to the sk_buff containing the small |
| 1080 | or large buffer(s) cast to u32. */ | 1083 | or large buffer(s) cast to u32. */ |
| 1081 | static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, | 1084 | static void push_rxbufs(ns_dev *card, struct sk_buff *skb) |
| 1082 | u32 handle2, u32 addr2) | ||
| 1083 | { | 1085 | { |
| 1086 | struct ns_skb_cb *cb = NS_SKB_CB(skb); | ||
| 1087 | u32 handle1, addr1; | ||
| 1088 | u32 handle2, addr2; | ||
| 1084 | u32 stat; | 1089 | u32 stat; |
| 1085 | unsigned long flags; | 1090 | unsigned long flags; |
| 1086 | 1091 | ||
| 1092 | /* *BARF* */ | ||
| 1093 | handle2 = addr2 = 0; | ||
| 1094 | handle1 = (u32)skb; | ||
| 1095 | addr1 = (u32)virt_to_bus(skb->data); | ||
| 1087 | 1096 | ||
| 1088 | #ifdef GENERAL_DEBUG | 1097 | #ifdef GENERAL_DEBUG |
| 1089 | if (!addr1) | 1098 | if (!addr1) |
| @@ -1093,7 +1102,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, | |||
| 1093 | stat = readl(card->membase + STAT); | 1102 | stat = readl(card->membase + STAT); |
| 1094 | card->sbfqc = ns_stat_sfbqc_get(stat); | 1103 | card->sbfqc = ns_stat_sfbqc_get(stat); |
| 1095 | card->lbfqc = ns_stat_lfbqc_get(stat); | 1104 | card->lbfqc = ns_stat_lfbqc_get(stat); |
| 1096 | if (type == BUF_SM) | 1105 | if (cb->buf_type == BUF_SM) |
| 1097 | { | 1106 | { |
| 1098 | if (!addr2) | 1107 | if (!addr2) |
| 1099 | { | 1108 | { |
| @@ -1111,7 +1120,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, | |||
| 1111 | } | 1120 | } |
| 1112 | } | 1121 | } |
| 1113 | } | 1122 | } |
| 1114 | else /* type == BUF_LG */ | 1123 | else /* buf_type == BUF_LG */ |
| 1115 | { | 1124 | { |
| 1116 | if (!addr2) | 1125 | if (!addr2) |
| 1117 | { | 1126 | { |
| @@ -1132,26 +1141,26 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, | |||
| 1132 | 1141 | ||
| 1133 | if (addr2) | 1142 | if (addr2) |
| 1134 | { | 1143 | { |
| 1135 | if (type == BUF_SM) | 1144 | if (cb->buf_type == BUF_SM) |
| 1136 | { | 1145 | { |
| 1137 | if (card->sbfqc >= card->sbnr.max) | 1146 | if (card->sbfqc >= card->sbnr.max) |
| 1138 | { | 1147 | { |
| 1139 | skb_unlink((struct sk_buff *) handle1); | 1148 | skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue); |
| 1140 | dev_kfree_skb_any((struct sk_buff *) handle1); | 1149 | dev_kfree_skb_any((struct sk_buff *) handle1); |
| 1141 | skb_unlink((struct sk_buff *) handle2); | 1150 | skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue); |
| 1142 | dev_kfree_skb_any((struct sk_buff *) handle2); | 1151 | dev_kfree_skb_any((struct sk_buff *) handle2); |
| 1143 | return; | 1152 | return; |
| 1144 | } | 1153 | } |
| 1145 | else | 1154 | else |
| 1146 | card->sbfqc += 2; | 1155 | card->sbfqc += 2; |
| 1147 | } | 1156 | } |
| 1148 | else /* (type == BUF_LG) */ | 1157 | else /* (buf_type == BUF_LG) */ |
| 1149 | { | 1158 | { |
| 1150 | if (card->lbfqc >= card->lbnr.max) | 1159 | if (card->lbfqc >= card->lbnr.max) |
| 1151 | { | 1160 | { |
| 1152 | skb_unlink((struct sk_buff *) handle1); | 1161 | skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue); |
| 1153 | dev_kfree_skb_any((struct sk_buff *) handle1); | 1162 | dev_kfree_skb_any((struct sk_buff *) handle1); |
| 1154 | skb_unlink((struct sk_buff *) handle2); | 1163 | skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue); |
| 1155 | dev_kfree_skb_any((struct sk_buff *) handle2); | 1164 | dev_kfree_skb_any((struct sk_buff *) handle2); |
| 1156 | return; | 1165 | return; |
| 1157 | } | 1166 | } |
| @@ -1166,12 +1175,12 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1, | |||
| 1166 | writel(handle2, card->membase + DR2); | 1175 | writel(handle2, card->membase + DR2); |
| 1167 | writel(addr1, card->membase + DR1); | 1176 | writel(addr1, card->membase + DR1); |
| 1168 | writel(handle1, card->membase + DR0); | 1177 | writel(handle1, card->membase + DR0); |
| 1169 | writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD); | 1178 | writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD); |
| 1170 | 1179 | ||
| 1171 | spin_unlock_irqrestore(&card->res_lock, flags); | 1180 | spin_unlock_irqrestore(&card->res_lock, flags); |
| 1172 | 1181 | ||
| 1173 | XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, | 1182 | XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, |
| 1174 | (type == BUF_SM ? "small" : "large"), addr1, addr2); | 1183 | (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2); |
| 1175 | } | 1184 | } |
| 1176 | 1185 | ||
| 1177 | if (!card->efbie && card->sbfqc >= card->sbnr.min && | 1186 | if (!card->efbie && card->sbfqc >= card->sbnr.min && |
| @@ -1322,9 +1331,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs) | |||
| 1322 | card->efbie = 0; | 1331 | card->efbie = 0; |
| 1323 | break; | 1332 | break; |
| 1324 | } | 1333 | } |
| 1334 | NS_SKB_CB(sb)->buf_type = BUF_SM; | ||
| 1325 | skb_queue_tail(&card->sbpool.queue, sb); | 1335 | skb_queue_tail(&card->sbpool.queue, sb); |
| 1326 | skb_reserve(sb, NS_AAL0_HEADER); | 1336 | skb_reserve(sb, NS_AAL0_HEADER); |
| 1327 | push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); | 1337 | push_rxbufs(card, sb); |
| 1328 | } | 1338 | } |
| 1329 | card->sbfqc = i; | 1339 | card->sbfqc = i; |
| 1330 | process_rsq(card); | 1340 | process_rsq(card); |
| @@ -1348,9 +1358,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs) | |||
| 1348 | card->efbie = 0; | 1358 | card->efbie = 0; |
| 1349 | break; | 1359 | break; |
| 1350 | } | 1360 | } |
| 1361 | NS_SKB_CB(lb)->buf_type = BUF_LG; | ||
| 1351 | skb_queue_tail(&card->lbpool.queue, lb); | 1362 | skb_queue_tail(&card->lbpool.queue, lb); |
| 1352 | skb_reserve(lb, NS_SMBUFSIZE); | 1363 | skb_reserve(lb, NS_SMBUFSIZE); |
| 1353 | push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); | 1364 | push_rxbufs(card, lb); |
| 1354 | } | 1365 | } |
| 1355 | card->lbfqc = i; | 1366 | card->lbfqc = i; |
| 1356 | process_rsq(card); | 1367 | process_rsq(card); |
| @@ -2227,6 +2238,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2227 | recycle_rx_buf(card, skb); | 2238 | recycle_rx_buf(card, skb); |
| 2228 | return; | 2239 | return; |
| 2229 | } | 2240 | } |
| 2241 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | ||
| 2230 | } | 2242 | } |
| 2231 | else | 2243 | else |
| 2232 | if (--card->iovpool.count < card->iovnr.min) | 2244 | if (--card->iovpool.count < card->iovnr.min) |
| @@ -2234,6 +2246,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2234 | struct sk_buff *new_iovb; | 2246 | struct sk_buff *new_iovb; |
| 2235 | if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) | 2247 | if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) |
| 2236 | { | 2248 | { |
| 2249 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | ||
| 2237 | skb_queue_tail(&card->iovpool.queue, new_iovb); | 2250 | skb_queue_tail(&card->iovpool.queue, new_iovb); |
| 2238 | card->iovpool.count++; | 2251 | card->iovpool.count++; |
| 2239 | } | 2252 | } |
| @@ -2264,7 +2277,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2264 | 2277 | ||
| 2265 | if (NS_SKB(iovb)->iovcnt == 1) | 2278 | if (NS_SKB(iovb)->iovcnt == 1) |
| 2266 | { | 2279 | { |
| 2267 | if (skb->list != &card->sbpool.queue) | 2280 | if (NS_SKB_CB(skb)->buf_type != BUF_SM) |
| 2268 | { | 2281 | { |
| 2269 | printk("nicstar%d: Expected a small buffer, and this is not one.\n", | 2282 | printk("nicstar%d: Expected a small buffer, and this is not one.\n", |
| 2270 | card->index); | 2283 | card->index); |
| @@ -2278,7 +2291,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2278 | } | 2291 | } |
| 2279 | else /* NS_SKB(iovb)->iovcnt >= 2 */ | 2292 | else /* NS_SKB(iovb)->iovcnt >= 2 */ |
| 2280 | { | 2293 | { |
| 2281 | if (skb->list != &card->lbpool.queue) | 2294 | if (NS_SKB_CB(skb)->buf_type != BUF_LG) |
| 2282 | { | 2295 | { |
| 2283 | printk("nicstar%d: Expected a large buffer, and this is not one.\n", | 2296 | printk("nicstar%d: Expected a large buffer, and this is not one.\n", |
| 2284 | card->index); | 2297 | card->index); |
| @@ -2322,8 +2335,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2322 | /* skb points to a small buffer */ | 2335 | /* skb points to a small buffer */ |
| 2323 | if (!atm_charge(vcc, skb->truesize)) | 2336 | if (!atm_charge(vcc, skb->truesize)) |
| 2324 | { | 2337 | { |
| 2325 | push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), | 2338 | push_rxbufs(card, skb); |
| 2326 | 0, 0); | ||
| 2327 | atomic_inc(&vcc->stats->rx_drop); | 2339 | atomic_inc(&vcc->stats->rx_drop); |
| 2328 | } | 2340 | } |
| 2329 | else | 2341 | else |
| @@ -2350,8 +2362,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2350 | { | 2362 | { |
| 2351 | if (!atm_charge(vcc, sb->truesize)) | 2363 | if (!atm_charge(vcc, sb->truesize)) |
| 2352 | { | 2364 | { |
| 2353 | push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), | 2365 | push_rxbufs(card, sb); |
| 2354 | 0, 0); | ||
| 2355 | atomic_inc(&vcc->stats->rx_drop); | 2366 | atomic_inc(&vcc->stats->rx_drop); |
| 2356 | } | 2367 | } |
| 2357 | else | 2368 | else |
| @@ -2367,16 +2378,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2367 | atomic_inc(&vcc->stats->rx); | 2378 | atomic_inc(&vcc->stats->rx); |
| 2368 | } | 2379 | } |
| 2369 | 2380 | ||
| 2370 | push_rxbufs(card, BUF_LG, (u32) skb, | 2381 | push_rxbufs(card, skb); |
| 2371 | (u32) virt_to_bus(skb->data), 0, 0); | ||
| 2372 | 2382 | ||
| 2373 | } | 2383 | } |
| 2374 | else /* len > NS_SMBUFSIZE, the usual case */ | 2384 | else /* len > NS_SMBUFSIZE, the usual case */ |
| 2375 | { | 2385 | { |
| 2376 | if (!atm_charge(vcc, skb->truesize)) | 2386 | if (!atm_charge(vcc, skb->truesize)) |
| 2377 | { | 2387 | { |
| 2378 | push_rxbufs(card, BUF_LG, (u32) skb, | 2388 | push_rxbufs(card, skb); |
| 2379 | (u32) virt_to_bus(skb->data), 0, 0); | ||
| 2380 | atomic_inc(&vcc->stats->rx_drop); | 2389 | atomic_inc(&vcc->stats->rx_drop); |
| 2381 | } | 2390 | } |
| 2382 | else | 2391 | else |
| @@ -2394,8 +2403,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2394 | atomic_inc(&vcc->stats->rx); | 2403 | atomic_inc(&vcc->stats->rx); |
| 2395 | } | 2404 | } |
| 2396 | 2405 | ||
| 2397 | push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), | 2406 | push_rxbufs(card, sb); |
| 2398 | 0, 0); | ||
| 2399 | 2407 | ||
| 2400 | } | 2408 | } |
| 2401 | 2409 | ||
| @@ -2430,6 +2438,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2430 | card->hbpool.count++; | 2438 | card->hbpool.count++; |
| 2431 | } | 2439 | } |
| 2432 | } | 2440 | } |
| 2441 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | ||
| 2433 | } | 2442 | } |
| 2434 | else | 2443 | else |
| 2435 | if (--card->hbpool.count < card->hbnr.min) | 2444 | if (--card->hbpool.count < card->hbnr.min) |
| @@ -2437,6 +2446,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2437 | struct sk_buff *new_hb; | 2446 | struct sk_buff *new_hb; |
| 2438 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | 2447 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) |
| 2439 | { | 2448 | { |
| 2449 | NS_SKB_CB(new_hb)->buf_type = BUF_NONE; | ||
| 2440 | skb_queue_tail(&card->hbpool.queue, new_hb); | 2450 | skb_queue_tail(&card->hbpool.queue, new_hb); |
| 2441 | card->hbpool.count++; | 2451 | card->hbpool.count++; |
| 2442 | } | 2452 | } |
| @@ -2444,6 +2454,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2444 | { | 2454 | { |
| 2445 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | 2455 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) |
| 2446 | { | 2456 | { |
| 2457 | NS_SKB_CB(new_hb)->buf_type = BUF_NONE; | ||
| 2447 | skb_queue_tail(&card->hbpool.queue, new_hb); | 2458 | skb_queue_tail(&card->hbpool.queue, new_hb); |
| 2448 | card->hbpool.count++; | 2459 | card->hbpool.count++; |
| 2449 | } | 2460 | } |
| @@ -2473,8 +2484,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2473 | remaining = len - iov->iov_len; | 2484 | remaining = len - iov->iov_len; |
| 2474 | iov++; | 2485 | iov++; |
| 2475 | /* Free the small buffer */ | 2486 | /* Free the small buffer */ |
| 2476 | push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), | 2487 | push_rxbufs(card, sb); |
| 2477 | 0, 0); | ||
| 2478 | 2488 | ||
| 2479 | /* Copy all large buffers to the huge buffer and free them */ | 2489 | /* Copy all large buffers to the huge buffer and free them */ |
| 2480 | for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) | 2490 | for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) |
| @@ -2485,8 +2495,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |||
| 2485 | skb_put(hb, tocopy); | 2495 | skb_put(hb, tocopy); |
| 2486 | iov++; | 2496 | iov++; |
| 2487 | remaining -= tocopy; | 2497 | remaining -= tocopy; |
| 2488 | push_rxbufs(card, BUF_LG, (u32) lb, | 2498 | push_rxbufs(card, lb); |
| 2489 | (u32) virt_to_bus(lb->data), 0, 0); | ||
| 2490 | } | 2499 | } |
| 2491 | #ifdef EXTRA_DEBUG | 2500 | #ifdef EXTRA_DEBUG |
| 2492 | if (remaining != 0 || hb->len != len) | 2501 | if (remaining != 0 || hb->len != len) |
| @@ -2527,9 +2536,10 @@ static void ns_sb_destructor(struct sk_buff *sb) | |||
| 2527 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | 2536 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); |
| 2528 | if (sb == NULL) | 2537 | if (sb == NULL) |
| 2529 | break; | 2538 | break; |
| 2539 | NS_SKB_CB(sb)->buf_type = BUF_SM; | ||
| 2530 | skb_queue_tail(&card->sbpool.queue, sb); | 2540 | skb_queue_tail(&card->sbpool.queue, sb); |
| 2531 | skb_reserve(sb, NS_AAL0_HEADER); | 2541 | skb_reserve(sb, NS_AAL0_HEADER); |
| 2532 | push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); | 2542 | push_rxbufs(card, sb); |
| 2533 | } while (card->sbfqc < card->sbnr.min); | 2543 | } while (card->sbfqc < card->sbnr.min); |
| 2534 | } | 2544 | } |
| 2535 | 2545 | ||
| @@ -2550,9 +2560,10 @@ static void ns_lb_destructor(struct sk_buff *lb) | |||
| 2550 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | 2560 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); |
| 2551 | if (lb == NULL) | 2561 | if (lb == NULL) |
| 2552 | break; | 2562 | break; |
| 2563 | NS_SKB_CB(lb)->buf_type = BUF_LG; | ||
| 2553 | skb_queue_tail(&card->lbpool.queue, lb); | 2564 | skb_queue_tail(&card->lbpool.queue, lb); |
| 2554 | skb_reserve(lb, NS_SMBUFSIZE); | 2565 | skb_reserve(lb, NS_SMBUFSIZE); |
| 2555 | push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); | 2566 | push_rxbufs(card, lb); |
| 2556 | } while (card->lbfqc < card->lbnr.min); | 2567 | } while (card->lbfqc < card->lbnr.min); |
| 2557 | } | 2568 | } |
| 2558 | 2569 | ||
| @@ -2569,6 +2580,7 @@ static void ns_hb_destructor(struct sk_buff *hb) | |||
| 2569 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | 2580 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); |
| 2570 | if (hb == NULL) | 2581 | if (hb == NULL) |
| 2571 | break; | 2582 | break; |
| 2583 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | ||
| 2572 | skb_queue_tail(&card->hbpool.queue, hb); | 2584 | skb_queue_tail(&card->hbpool.queue, hb); |
| 2573 | card->hbpool.count++; | 2585 | card->hbpool.count++; |
| 2574 | } | 2586 | } |
| @@ -2577,45 +2589,25 @@ static void ns_hb_destructor(struct sk_buff *hb) | |||
| 2577 | #endif /* NS_USE_DESTRUCTORS */ | 2589 | #endif /* NS_USE_DESTRUCTORS */ |
| 2578 | 2590 | ||
| 2579 | 2591 | ||
| 2580 | |||
| 2581 | static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) | 2592 | static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) |
| 2582 | { | 2593 | { |
| 2583 | if (skb->list == &card->sbpool.queue) | 2594 | struct ns_skb_cb *cb = NS_SKB_CB(skb); |
| 2584 | push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0); | ||
| 2585 | else if (skb->list == &card->lbpool.queue) | ||
| 2586 | push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0); | ||
| 2587 | else | ||
| 2588 | { | ||
| 2589 | printk("nicstar%d: What kind of rx buffer is this?\n", card->index); | ||
| 2590 | dev_kfree_skb_any(skb); | ||
| 2591 | } | ||
| 2592 | } | ||
| 2593 | 2595 | ||
| 2596 | if (unlikely(cb->buf_type == BUF_NONE)) { | ||
| 2597 | printk("nicstar%d: What kind of rx buffer is this?\n", card->index); | ||
| 2598 | dev_kfree_skb_any(skb); | ||
| 2599 | } else | ||
| 2600 | push_rxbufs(card, skb); | ||
| 2601 | } | ||
| 2594 | 2602 | ||
| 2595 | 2603 | ||
| 2596 | static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) | 2604 | static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) |
| 2597 | { | 2605 | { |
| 2598 | struct sk_buff *skb; | 2606 | while (count-- > 0) |
| 2599 | 2607 | recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base); | |
| 2600 | for (; count > 0; count--) | ||
| 2601 | { | ||
| 2602 | skb = (struct sk_buff *) (iov++)->iov_base; | ||
| 2603 | if (skb->list == &card->sbpool.queue) | ||
| 2604 | push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), | ||
| 2605 | 0, 0); | ||
| 2606 | else if (skb->list == &card->lbpool.queue) | ||
| 2607 | push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), | ||
| 2608 | 0, 0); | ||
| 2609 | else | ||
| 2610 | { | ||
| 2611 | printk("nicstar%d: What kind of rx buffer is this?\n", card->index); | ||
| 2612 | dev_kfree_skb_any(skb); | ||
| 2613 | } | ||
| 2614 | } | ||
| 2615 | } | 2608 | } |
| 2616 | 2609 | ||
| 2617 | 2610 | ||
| 2618 | |||
| 2619 | static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) | 2611 | static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) |
| 2620 | { | 2612 | { |
| 2621 | if (card->iovpool.count < card->iovnr.max) | 2613 | if (card->iovpool.count < card->iovnr.max) |
| @@ -2631,7 +2623,7 @@ static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) | |||
| 2631 | 2623 | ||
| 2632 | static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) | 2624 | static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) |
| 2633 | { | 2625 | { |
| 2634 | skb_unlink(sb); | 2626 | skb_unlink(sb, &card->sbpool.queue); |
| 2635 | #ifdef NS_USE_DESTRUCTORS | 2627 | #ifdef NS_USE_DESTRUCTORS |
| 2636 | if (card->sbfqc < card->sbnr.min) | 2628 | if (card->sbfqc < card->sbnr.min) |
| 2637 | #else | 2629 | #else |
| @@ -2640,10 +2632,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) | |||
| 2640 | struct sk_buff *new_sb; | 2632 | struct sk_buff *new_sb; |
| 2641 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) | 2633 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) |
| 2642 | { | 2634 | { |
| 2635 | NS_SKB_CB(new_sb)->buf_type = BUF_SM; | ||
| 2643 | skb_queue_tail(&card->sbpool.queue, new_sb); | 2636 | skb_queue_tail(&card->sbpool.queue, new_sb); |
| 2644 | skb_reserve(new_sb, NS_AAL0_HEADER); | 2637 | skb_reserve(new_sb, NS_AAL0_HEADER); |
| 2645 | push_rxbufs(card, BUF_SM, (u32) new_sb, | 2638 | push_rxbufs(card, new_sb); |
| 2646 | (u32) virt_to_bus(new_sb->data), 0, 0); | ||
| 2647 | } | 2639 | } |
| 2648 | } | 2640 | } |
| 2649 | if (card->sbfqc < card->sbnr.init) | 2641 | if (card->sbfqc < card->sbnr.init) |
| @@ -2652,10 +2644,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) | |||
| 2652 | struct sk_buff *new_sb; | 2644 | struct sk_buff *new_sb; |
| 2653 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) | 2645 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) |
| 2654 | { | 2646 | { |
| 2647 | NS_SKB_CB(new_sb)->buf_type = BUF_SM; | ||
| 2655 | skb_queue_tail(&card->sbpool.queue, new_sb); | 2648 | skb_queue_tail(&card->sbpool.queue, new_sb); |
| 2656 | skb_reserve(new_sb, NS_AAL0_HEADER); | 2649 | skb_reserve(new_sb, NS_AAL0_HEADER); |
| 2657 | push_rxbufs(card, BUF_SM, (u32) new_sb, | 2650 | push_rxbufs(card, new_sb); |
| 2658 | (u32) virt_to_bus(new_sb->data), 0, 0); | ||
| 2659 | } | 2651 | } |
| 2660 | } | 2652 | } |
| 2661 | } | 2653 | } |
| @@ -2664,7 +2656,7 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) | |||
| 2664 | 2656 | ||
| 2665 | static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) | 2657 | static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) |
| 2666 | { | 2658 | { |
| 2667 | skb_unlink(lb); | 2659 | skb_unlink(lb, &card->lbpool.queue); |
| 2668 | #ifdef NS_USE_DESTRUCTORS | 2660 | #ifdef NS_USE_DESTRUCTORS |
| 2669 | if (card->lbfqc < card->lbnr.min) | 2661 | if (card->lbfqc < card->lbnr.min) |
| 2670 | #else | 2662 | #else |
| @@ -2673,10 +2665,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) | |||
| 2673 | struct sk_buff *new_lb; | 2665 | struct sk_buff *new_lb; |
| 2674 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) | 2666 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) |
| 2675 | { | 2667 | { |
| 2668 | NS_SKB_CB(new_lb)->buf_type = BUF_LG; | ||
| 2676 | skb_queue_tail(&card->lbpool.queue, new_lb); | 2669 | skb_queue_tail(&card->lbpool.queue, new_lb); |
| 2677 | skb_reserve(new_lb, NS_SMBUFSIZE); | 2670 | skb_reserve(new_lb, NS_SMBUFSIZE); |
| 2678 | push_rxbufs(card, BUF_LG, (u32) new_lb, | 2671 | push_rxbufs(card, new_lb); |
| 2679 | (u32) virt_to_bus(new_lb->data), 0, 0); | ||
| 2680 | } | 2672 | } |
| 2681 | } | 2673 | } |
| 2682 | if (card->lbfqc < card->lbnr.init) | 2674 | if (card->lbfqc < card->lbnr.init) |
| @@ -2685,10 +2677,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) | |||
| 2685 | struct sk_buff *new_lb; | 2677 | struct sk_buff *new_lb; |
| 2686 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) | 2678 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) |
| 2687 | { | 2679 | { |
| 2680 | NS_SKB_CB(new_lb)->buf_type = BUF_LG; | ||
| 2688 | skb_queue_tail(&card->lbpool.queue, new_lb); | 2681 | skb_queue_tail(&card->lbpool.queue, new_lb); |
| 2689 | skb_reserve(new_lb, NS_SMBUFSIZE); | 2682 | skb_reserve(new_lb, NS_SMBUFSIZE); |
| 2690 | push_rxbufs(card, BUF_LG, (u32) new_lb, | 2683 | push_rxbufs(card, new_lb); |
| 2691 | (u32) virt_to_bus(new_lb->data), 0, 0); | ||
| 2692 | } | 2684 | } |
| 2693 | } | 2685 | } |
| 2694 | } | 2686 | } |
| @@ -2880,9 +2872,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | |||
| 2880 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | 2872 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); |
| 2881 | if (sb == NULL) | 2873 | if (sb == NULL) |
| 2882 | return -ENOMEM; | 2874 | return -ENOMEM; |
| 2875 | NS_SKB_CB(sb)->buf_type = BUF_SM; | ||
| 2883 | skb_queue_tail(&card->sbpool.queue, sb); | 2876 | skb_queue_tail(&card->sbpool.queue, sb); |
| 2884 | skb_reserve(sb, NS_AAL0_HEADER); | 2877 | skb_reserve(sb, NS_AAL0_HEADER); |
| 2885 | push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0); | 2878 | push_rxbufs(card, sb); |
| 2886 | } | 2879 | } |
| 2887 | break; | 2880 | break; |
| 2888 | 2881 | ||
| @@ -2894,9 +2887,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | |||
| 2894 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | 2887 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); |
| 2895 | if (lb == NULL) | 2888 | if (lb == NULL) |
| 2896 | return -ENOMEM; | 2889 | return -ENOMEM; |
| 2890 | NS_SKB_CB(lb)->buf_type = BUF_LG; | ||
| 2897 | skb_queue_tail(&card->lbpool.queue, lb); | 2891 | skb_queue_tail(&card->lbpool.queue, lb); |
| 2898 | skb_reserve(lb, NS_SMBUFSIZE); | 2892 | skb_reserve(lb, NS_SMBUFSIZE); |
| 2899 | push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0); | 2893 | push_rxbufs(card, lb); |
| 2900 | } | 2894 | } |
| 2901 | break; | 2895 | break; |
| 2902 | 2896 | ||
| @@ -2923,6 +2917,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | |||
| 2923 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | 2917 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); |
| 2924 | if (hb == NULL) | 2918 | if (hb == NULL) |
| 2925 | return -ENOMEM; | 2919 | return -ENOMEM; |
| 2920 | NS_SKB_CB(hb)->buf_type = BUF_NONE; | ||
| 2926 | ns_grab_int_lock(card, flags); | 2921 | ns_grab_int_lock(card, flags); |
| 2927 | skb_queue_tail(&card->hbpool.queue, hb); | 2922 | skb_queue_tail(&card->hbpool.queue, hb); |
| 2928 | card->hbpool.count++; | 2923 | card->hbpool.count++; |
| @@ -2953,6 +2948,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | |||
| 2953 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); | 2948 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); |
| 2954 | if (iovb == NULL) | 2949 | if (iovb == NULL) |
| 2955 | return -ENOMEM; | 2950 | return -ENOMEM; |
| 2951 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; | ||
| 2956 | ns_grab_int_lock(card, flags); | 2952 | ns_grab_int_lock(card, flags); |
| 2957 | skb_queue_tail(&card->iovpool.queue, iovb); | 2953 | skb_queue_tail(&card->iovpool.queue, iovb); |
| 2958 | card->iovpool.count++; | 2954 | card->iovpool.count++; |
| @@ -2979,17 +2975,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | |||
| 2979 | } | 2975 | } |
| 2980 | 2976 | ||
| 2981 | 2977 | ||
| 2982 | |||
| 2983 | static void which_list(ns_dev *card, struct sk_buff *skb) | 2978 | static void which_list(ns_dev *card, struct sk_buff *skb) |
| 2984 | { | 2979 | { |
| 2985 | printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ? | 2980 | printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type); |
| 2986 | "small" : skb->list == &card->lbpool.queue ? "large" : | ||
| 2987 | skb->list == &card->hbpool.queue ? "huge" : | ||
| 2988 | skb->list == &card->iovpool.queue ? "iovec" : "unknown"); | ||
| 2989 | } | 2981 | } |
| 2990 | 2982 | ||
| 2991 | 2983 | ||
| 2992 | |||
| 2993 | static void ns_poll(unsigned long arg) | 2984 | static void ns_poll(unsigned long arg) |
| 2994 | { | 2985 | { |
| 2995 | int i; | 2986 | int i; |
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h index ea83c46c8ba5..5997bcb45b59 100644 --- a/drivers/atm/nicstar.h +++ b/drivers/atm/nicstar.h | |||
| @@ -103,8 +103,14 @@ | |||
| 103 | 103 | ||
| 104 | #define NS_IOREMAP_SIZE 4096 | 104 | #define NS_IOREMAP_SIZE 4096 |
| 105 | 105 | ||
| 106 | #define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ | 106 | /* |
| 107 | #define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ | 107 | * BUF_XX distinguish the Rx buffers depending on their (small/large) size. |
| 108 | * BUG_SM and BUG_LG are both used by the driver and the device. | ||
| 109 | * BUF_NONE is only used by the driver. | ||
| 110 | */ | ||
| 111 | #define BUF_SM 0x00000000 /* These two are used for push_rxbufs() */ | ||
| 112 | #define BUF_LG 0x00000001 /* CMD, Write_FreeBufQ, LBUF bit */ | ||
| 113 | #define BUF_NONE 0xffffffff /* Software only: */ | ||
| 108 | 114 | ||
| 109 | #define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ | 115 | #define NS_HBUFSIZE 65568 /* Size of max. AAL5 PDU */ |
| 110 | #define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ | 116 | #define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \ |
| @@ -684,6 +690,12 @@ enum ns_regs | |||
| 684 | /* Device driver structures ***************************************************/ | 690 | /* Device driver structures ***************************************************/ |
| 685 | 691 | ||
| 686 | 692 | ||
| 693 | struct ns_skb_cb { | ||
| 694 | u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */ | ||
| 695 | }; | ||
| 696 | |||
| 697 | #define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb)) | ||
| 698 | |||
| 687 | typedef struct tsq_info | 699 | typedef struct tsq_info |
| 688 | { | 700 | { |
| 689 | void *org; | 701 | void *org; |
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a2b236a966e0..85fee9530fa9 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c | |||
| @@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); | |||
| 417 | chan = (here[3] & uPD98401_AAL5_CHAN) >> | 417 | chan = (here[3] & uPD98401_AAL5_CHAN) >> |
| 418 | uPD98401_AAL5_CHAN_SHIFT; | 418 | uPD98401_AAL5_CHAN_SHIFT; |
| 419 | if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { | 419 | if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { |
| 420 | int pos = ZATM_VCC(vcc)->pool; | ||
| 421 | |||
| 420 | vcc = zatm_dev->rx_map[chan]; | 422 | vcc = zatm_dev->rx_map[chan]; |
| 421 | if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool]) | 423 | if (skb == zatm_dev->last_free[pos]) |
| 422 | zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL; | 424 | zatm_dev->last_free[pos] = NULL; |
| 423 | skb_unlink(skb); | 425 | skb_unlink(skb, zatm_dev->pool + pos); |
| 424 | } | 426 | } |
| 425 | else { | 427 | else { |
| 426 | printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " | 428 | printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " |
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index c42d7e6ac1c5..e8d2a340356d 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c | |||
| @@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb) | |||
| 158 | if (err) { | 158 | if (err) { |
| 159 | BT_ERR("%s bulk tx submit failed urb %p err %d", | 159 | BT_ERR("%s bulk tx submit failed urb %p err %d", |
| 160 | bfusb->hdev->name, urb, err); | 160 | bfusb->hdev->name, urb, err); |
| 161 | skb_unlink(skb); | 161 | skb_unlink(skb, &bfusb->pending_q); |
| 162 | usb_free_urb(urb); | 162 | usb_free_urb(urb); |
| 163 | } else | 163 | } else |
| 164 | atomic_inc(&bfusb->pending_tx); | 164 | atomic_inc(&bfusb->pending_tx); |
| @@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs) | |||
| 212 | 212 | ||
| 213 | read_lock(&bfusb->lock); | 213 | read_lock(&bfusb->lock); |
| 214 | 214 | ||
| 215 | skb_unlink(skb); | 215 | skb_unlink(skb, &bfusb->pending_q); |
| 216 | skb_queue_tail(&bfusb->completed_q, skb); | 216 | skb_queue_tail(&bfusb->completed_q, skb); |
| 217 | 217 | ||
| 218 | bfusb_tx_wakeup(bfusb); | 218 | bfusb_tx_wakeup(bfusb); |
| @@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb) | |||
| 253 | if (err) { | 253 | if (err) { |
| 254 | BT_ERR("%s bulk rx submit failed urb %p err %d", | 254 | BT_ERR("%s bulk rx submit failed urb %p err %d", |
| 255 | bfusb->hdev->name, urb, err); | 255 | bfusb->hdev->name, urb, err); |
| 256 | skb_unlink(skb); | 256 | skb_unlink(skb, &bfusb->pending_q); |
| 257 | kfree_skb(skb); | 257 | kfree_skb(skb); |
| 258 | usb_free_urb(urb); | 258 | usb_free_urb(urb); |
| 259 | } | 259 | } |
| @@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs) | |||
| 398 | buf += len; | 398 | buf += len; |
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | skb_unlink(skb); | 401 | skb_unlink(skb, &bfusb->pending_q); |
| 402 | kfree_skb(skb); | 402 | kfree_skb(skb); |
| 403 | 403 | ||
| 404 | bfusb_rx_submit(bfusb, urb); | 404 | bfusb_rx_submit(bfusb, urb); |
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index b248d89de8b4..d633770fac8e 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c | |||
| @@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode, | |||
| 681 | return; | 681 | return; |
| 682 | } | 682 | } |
| 683 | 683 | ||
| 684 | __skb_unlink(skb, skb->list); | 684 | __skb_unlink(skb, &host->pending_packet_queue); |
| 685 | 685 | ||
| 686 | if (packet->state == hpsb_queued) { | 686 | if (packet->state == hpsb_queued) { |
| 687 | packet->sendtime = jiffies; | 687 | packet->sendtime = jiffies; |
| @@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque) | |||
| 989 | packet = (struct hpsb_packet *)skb->data; | 989 | packet = (struct hpsb_packet *)skb->data; |
| 990 | 990 | ||
| 991 | if (time_before(packet->sendtime + expire, jiffies)) { | 991 | if (time_before(packet->sendtime + expire, jiffies)) { |
| 992 | __skb_unlink(skb, skb->list); | 992 | __skb_unlink(skb, &host->pending_packet_queue); |
| 993 | packet->state = hpsb_complete; | 993 | packet->state = hpsb_complete; |
| 994 | packet->ack_code = ACKX_TIMEOUT; | 994 | packet->ack_code = ACKX_TIMEOUT; |
| 995 | queue_packet_complete(packet); | 995 | queue_packet_complete(packet); |
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c index afa46681f983..6ae6eb322111 100644 --- a/drivers/isdn/act2000/capi.c +++ b/drivers/isdn/act2000/capi.c | |||
| @@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) { | |||
| 606 | if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && | 606 | if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) && |
| 607 | (m->msg.data_b3_req.blocknr == blocknr)) { | 607 | (m->msg.data_b3_req.blocknr == blocknr)) { |
| 608 | /* found corresponding DATA_B3_REQ */ | 608 | /* found corresponding DATA_B3_REQ */ |
| 609 | skb_unlink(tmp); | 609 | skb_unlink(tmp, &card->ackq); |
| 610 | chan->queued -= m->msg.data_b3_req.datalen; | 610 | chan->queued -= m->msg.data_b3_req.datalen; |
| 611 | if (m->msg.data_b3_req.flags) | 611 | if (m->msg.data_b3_req.flags) |
| 612 | ret = m->msg.data_b3_req.datalen; | 612 | ret = m->msg.data_b3_req.datalen; |
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index 3ad0b6751f6f..221354eea21f 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c | |||
| @@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 156 | 156 | ||
| 157 | SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); | 157 | SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); |
| 158 | 158 | ||
| 159 | #ifdef SHAPER_COMPLEX /* and broken.. */ | ||
| 160 | |||
| 161 | while(ptr && ptr!=(struct sk_buff *)&shaper->sendq) | ||
| 162 | { | ||
| 163 | if(ptr->pri<skb->pri | ||
| 164 | && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP) | ||
| 165 | { | ||
| 166 | struct sk_buff *tmp=ptr->prev; | ||
| 167 | |||
| 168 | /* | ||
| 169 | * It goes before us therefore we slip the length | ||
| 170 | * of the new frame. | ||
| 171 | */ | ||
| 172 | |||
| 173 | SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen; | ||
| 174 | SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen; | ||
| 175 | |||
| 176 | /* | ||
| 177 | * The packet may have slipped so far back it | ||
| 178 | * fell off. | ||
| 179 | */ | ||
| 180 | if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY) | ||
| 181 | { | ||
| 182 | skb_unlink(ptr); | ||
| 183 | dev_kfree_skb(ptr); | ||
| 184 | } | ||
| 185 | ptr=tmp; | ||
| 186 | } | ||
| 187 | else | ||
| 188 | break; | ||
| 189 | } | ||
| 190 | if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq) | ||
| 191 | skb_queue_head(&shaper->sendq,skb); | ||
| 192 | else | ||
| 193 | { | ||
| 194 | struct sk_buff *tmp; | ||
| 195 | /* | ||
| 196 | * Set the packet clock out time according to the | ||
| 197 | * frames ahead. Im sure a bit of thought could drop | ||
| 198 | * this loop. | ||
| 199 | */ | ||
| 200 | for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next) | ||
| 201 | SHAPERCB(skb)->shapeclock+=tmp->shapelen; | ||
| 202 | skb_append(ptr,skb); | ||
| 203 | } | ||
| 204 | #else | ||
| 205 | { | 159 | { |
| 206 | struct sk_buff *tmp; | 160 | struct sk_buff *tmp; |
| 207 | /* | 161 | /* |
| @@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 220 | } else | 174 | } else |
| 221 | skb_queue_tail(&shaper->sendq, skb); | 175 | skb_queue_tail(&shaper->sendq, skb); |
| 222 | } | 176 | } |
| 223 | #endif | 177 | |
| 224 | if(sh_debug) | 178 | if(sh_debug) |
| 225 | printk("Frame queued.\n"); | 179 | printk("Frame queued.\n"); |
| 226 | if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) | 180 | if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) |
| @@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper) | |||
| 302 | * Pull the frame and get interrupts back on. | 256 | * Pull the frame and get interrupts back on. |
| 303 | */ | 257 | */ |
| 304 | 258 | ||
| 305 | skb_unlink(skb); | 259 | skb_unlink(skb, &shaper->sendq); |
| 306 | if (shaper->recovery < | 260 | if (shaper->recovery < |
| 307 | SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) | 261 | SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) |
| 308 | shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; | 262 | shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; |
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c index c5f5e62aab8b..0497dbdb8631 100644 --- a/drivers/net/wan/sdla_fr.c +++ b/drivers/net/wan/sdla_fr.c | |||
| @@ -445,7 +445,7 @@ void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags); | |||
| 445 | void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); | 445 | void s508_s514_lock(sdla_t *card, unsigned long *smp_flags); |
| 446 | 446 | ||
| 447 | unsigned short calc_checksum (char *, int); | 447 | unsigned short calc_checksum (char *, int); |
| 448 | static int setup_fr_header(struct sk_buff** skb, | 448 | static int setup_fr_header(struct sk_buff *skb, |
| 449 | struct net_device* dev, char op_mode); | 449 | struct net_device* dev, char op_mode); |
| 450 | 450 | ||
| 451 | 451 | ||
| @@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev) | |||
| 1372 | /* Move the if_header() code to here. By inserting frame | 1372 | /* Move the if_header() code to here. By inserting frame |
| 1373 | * relay header in if_header() we would break the | 1373 | * relay header in if_header() we would break the |
| 1374 | * tcpdump and other packet sniffers */ | 1374 | * tcpdump and other packet sniffers */ |
| 1375 | chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby); | 1375 | chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby); |
| 1376 | if (chan->fr_header_len < 0 ){ | 1376 | if (chan->fr_header_len < 0 ){ |
| 1377 | ++chan->ifstats.tx_dropped; | 1377 | ++chan->ifstats.tx_dropped; |
| 1378 | ++card->wandev.stats.tx_dropped; | 1378 | ++card->wandev.stats.tx_dropped; |
| @@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev, | |||
| 1597 | return 1; | 1597 | return 1; |
| 1598 | } | 1598 | } |
| 1599 | 1599 | ||
| 1600 | skb_unlink(skb); | ||
| 1601 | |||
| 1602 | chan->transmit_length = len; | 1600 | chan->transmit_length = len; |
| 1603 | chan->delay_skb = skb; | 1601 | chan->delay_skb = skb; |
| 1604 | 1602 | ||
| @@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card) | |||
| 4871 | } | 4869 | } |
| 4872 | } | 4870 | } |
| 4873 | 4871 | ||
| 4874 | static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, | 4872 | static int setup_fr_header(struct sk_buff *skb, struct net_device* dev, |
| 4875 | char op_mode) | 4873 | char op_mode) |
| 4876 | { | 4874 | { |
| 4877 | struct sk_buff *skb = *skb_orig; | ||
| 4878 | fr_channel_t *chan=dev->priv; | 4875 | fr_channel_t *chan=dev->priv; |
| 4879 | 4876 | ||
| 4880 | if (op_mode == WANPIPE){ | 4877 | if (op_mode == WANPIPE) { |
| 4881 | |||
| 4882 | chan->fr_header[0]=Q922_UI; | 4878 | chan->fr_header[0]=Q922_UI; |
| 4883 | 4879 | ||
| 4884 | switch (htons(skb->protocol)){ | 4880 | switch (htons(skb->protocol)){ |
| 4885 | |||
| 4886 | case ETH_P_IP: | 4881 | case ETH_P_IP: |
| 4887 | chan->fr_header[1]=NLPID_IP; | 4882 | chan->fr_header[1]=NLPID_IP; |
| 4888 | break; | 4883 | break; |
| @@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, | |||
| 4894 | } | 4889 | } |
| 4895 | 4890 | ||
| 4896 | /* If we are in bridging mode, we must apply | 4891 | /* If we are in bridging mode, we must apply |
| 4897 | * an Ethernet header */ | 4892 | * an Ethernet header |
| 4898 | if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){ | 4893 | */ |
| 4899 | 4894 | if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) { | |
| 4900 | |||
| 4901 | /* Encapsulate the packet as a bridged Ethernet frame. */ | 4895 | /* Encapsulate the packet as a bridged Ethernet frame. */ |
| 4902 | #ifdef DEBUG | 4896 | #ifdef DEBUG |
| 4903 | printk(KERN_INFO "%s: encapsulating skb for frame relay\n", | 4897 | printk(KERN_INFO "%s: encapsulating skb for frame relay\n", |
| 4904 | dev->name); | 4898 | dev->name); |
| 4905 | #endif | 4899 | #endif |
| 4906 | |||
| 4907 | chan->fr_header[0] = 0x03; | 4900 | chan->fr_header[0] = 0x03; |
| 4908 | chan->fr_header[1] = 0x00; | 4901 | chan->fr_header[1] = 0x00; |
| 4909 | chan->fr_header[2] = 0x80; | 4902 | chan->fr_header[2] = 0x80; |
| @@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev, | |||
| 4916 | /* Yuck. */ | 4909 | /* Yuck. */ |
| 4917 | skb->protocol = ETH_P_802_3; | 4910 | skb->protocol = ETH_P_802_3; |
| 4918 | return 8; | 4911 | return 8; |
| 4919 | |||
| 4920 | } | 4912 | } |
| 4921 | 4913 | ||
| 4922 | return 0; | 4914 | return 0; |
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index 4528a00c45b0..a2f67245f6da 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c | |||
| @@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net) | |||
| 2903 | * completion callbacks. 2.5 should have fixed those bugs... | 2903 | * completion callbacks. 2.5 should have fixed those bugs... |
| 2904 | */ | 2904 | */ |
| 2905 | 2905 | ||
| 2906 | static void defer_bh (struct usbnet *dev, struct sk_buff *skb) | 2906 | static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) |
| 2907 | { | 2907 | { |
| 2908 | struct sk_buff_head *list = skb->list; | ||
| 2909 | unsigned long flags; | 2908 | unsigned long flags; |
| 2910 | 2909 | ||
| 2911 | spin_lock_irqsave (&list->lock, flags); | 2910 | spin_lock_irqsave(&list->lock, flags); |
| 2912 | __skb_unlink (skb, list); | 2911 | __skb_unlink(skb, list); |
| 2913 | spin_unlock (&list->lock); | 2912 | spin_unlock(&list->lock); |
| 2914 | spin_lock (&dev->done.lock); | 2913 | spin_lock(&dev->done.lock); |
| 2915 | __skb_queue_tail (&dev->done, skb); | 2914 | __skb_queue_tail(&dev->done, skb); |
| 2916 | if (dev->done.qlen == 1) | 2915 | if (dev->done.qlen == 1) |
| 2917 | tasklet_schedule (&dev->bh); | 2916 | tasklet_schedule(&dev->bh); |
| 2918 | spin_unlock_irqrestore (&dev->done.lock, flags); | 2917 | spin_unlock_irqrestore(&dev->done.lock, flags); |
| 2919 | } | 2918 | } |
| 2920 | 2919 | ||
| 2921 | /* some work can't be done in tasklets, so we use keventd | 2920 | /* some work can't be done in tasklets, so we use keventd |
| @@ -3120,7 +3119,7 @@ block: | |||
| 3120 | break; | 3119 | break; |
| 3121 | } | 3120 | } |
| 3122 | 3121 | ||
| 3123 | defer_bh (dev, skb); | 3122 | defer_bh(dev, skb, &dev->rxq); |
| 3124 | 3123 | ||
| 3125 | if (urb) { | 3124 | if (urb) { |
| 3126 | if (netif_running (dev->net) | 3125 | if (netif_running (dev->net) |
| @@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs) | |||
| 3490 | 3489 | ||
| 3491 | urb->dev = NULL; | 3490 | urb->dev = NULL; |
| 3492 | entry->state = tx_done; | 3491 | entry->state = tx_done; |
| 3493 | defer_bh (dev, skb); | 3492 | defer_bh(dev, skb, &dev->txq); |
| 3494 | } | 3493 | } |
| 3495 | 3494 | ||
| 3496 | /*-------------------------------------------------------------------------*/ | 3495 | /*-------------------------------------------------------------------------*/ |
