aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2015-03-09 23:56:53 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-10 13:34:53 -0400
commit3855634deb051bbce155d149bca05b99a3528d5d (patch)
tree5d13aebf955116762baf832e84bc2ad842f2e21d /drivers
parent515fb5c317db991e2f9877936b3b21a35c1d3190 (diff)
drivers: atm: nicstar: remove ifdef'd out skb destructors
remove dead code. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/nicstar.c90
1 files changed, 0 insertions, 90 deletions
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b7e1cc0a97c8..ddc4ceb85fc5 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -73,9 +73,6 @@
73#undef GENERAL_DEBUG 73#undef GENERAL_DEBUG
74#undef EXTRA_DEBUG 74#undef EXTRA_DEBUG
75 75
76#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
77 you're going to use only raw ATM */
78
79/* Do not touch these */ 76/* Do not touch these */
80 77
81#ifdef TX_DEBUG 78#ifdef TX_DEBUG
@@ -138,11 +135,6 @@ static void process_tsq(ns_dev * card);
138static void drain_scq(ns_dev * card, scq_info * scq, int pos); 135static void drain_scq(ns_dev * card, scq_info * scq, int pos);
139static void process_rsq(ns_dev * card); 136static void process_rsq(ns_dev * card);
140static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); 137static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
141#ifdef NS_USE_DESTRUCTORS
142static void ns_sb_destructor(struct sk_buff *sb);
143static void ns_lb_destructor(struct sk_buff *lb);
144static void ns_hb_destructor(struct sk_buff *hb);
145#endif /* NS_USE_DESTRUCTORS */
146static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); 138static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
147static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); 139static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
148static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); 140static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
@@ -2169,9 +2161,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2169 } else { 2161 } else {
2170 skb_put(skb, len); 2162 skb_put(skb, len);
2171 dequeue_sm_buf(card, skb); 2163 dequeue_sm_buf(card, skb);
2172#ifdef NS_USE_DESTRUCTORS
2173 skb->destructor = ns_sb_destructor;
2174#endif /* NS_USE_DESTRUCTORS */
2175 ATM_SKB(skb)->vcc = vcc; 2164 ATM_SKB(skb)->vcc = vcc;
2176 __net_timestamp(skb); 2165 __net_timestamp(skb);
2177 vcc->push(vcc, skb); 2166 vcc->push(vcc, skb);
@@ -2190,9 +2179,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2190 } else { 2179 } else {
2191 skb_put(sb, len); 2180 skb_put(sb, len);
2192 dequeue_sm_buf(card, sb); 2181 dequeue_sm_buf(card, sb);
2193#ifdef NS_USE_DESTRUCTORS
2194 sb->destructor = ns_sb_destructor;
2195#endif /* NS_USE_DESTRUCTORS */
2196 ATM_SKB(sb)->vcc = vcc; 2182 ATM_SKB(sb)->vcc = vcc;
2197 __net_timestamp(sb); 2183 __net_timestamp(sb);
2198 vcc->push(vcc, sb); 2184 vcc->push(vcc, sb);
@@ -2208,9 +2194,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2208 atomic_inc(&vcc->stats->rx_drop); 2194 atomic_inc(&vcc->stats->rx_drop);
2209 } else { 2195 } else {
2210 dequeue_lg_buf(card, skb); 2196 dequeue_lg_buf(card, skb);
2211#ifdef NS_USE_DESTRUCTORS
2212 skb->destructor = ns_lb_destructor;
2213#endif /* NS_USE_DESTRUCTORS */
2214 skb_push(skb, NS_SMBUFSIZE); 2197 skb_push(skb, NS_SMBUFSIZE);
2215 skb_copy_from_linear_data(sb, skb->data, 2198 skb_copy_from_linear_data(sb, skb->data,
2216 NS_SMBUFSIZE); 2199 NS_SMBUFSIZE);
@@ -2322,9 +2305,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2322 card->index); 2305 card->index);
2323#endif /* EXTRA_DEBUG */ 2306#endif /* EXTRA_DEBUG */
2324 ATM_SKB(hb)->vcc = vcc; 2307 ATM_SKB(hb)->vcc = vcc;
2325#ifdef NS_USE_DESTRUCTORS
2326 hb->destructor = ns_hb_destructor;
2327#endif /* NS_USE_DESTRUCTORS */
2328 __net_timestamp(hb); 2308 __net_timestamp(hb);
2329 vcc->push(vcc, hb); 2309 vcc->push(vcc, hb);
2330 atomic_inc(&vcc->stats->rx); 2310 atomic_inc(&vcc->stats->rx);
@@ -2337,68 +2317,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
2337 2317
2338} 2318}
2339 2319
2340#ifdef NS_USE_DESTRUCTORS
2341
2342static void ns_sb_destructor(struct sk_buff *sb)
2343{
2344 ns_dev *card;
2345 u32 stat;
2346
2347 card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
2348 stat = readl(card->membase + STAT);
2349 card->sbfqc = ns_stat_sfbqc_get(stat);
2350 card->lbfqc = ns_stat_lfbqc_get(stat);
2351
2352 do {
2353 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2354 if (sb == NULL)
2355 break;
2356 NS_PRV_BUFTYPE(sb) = BUF_SM;
2357 skb_queue_tail(&card->sbpool.queue, sb);
2358 skb_reserve(sb, NS_AAL0_HEADER);
2359 push_rxbufs(card, sb);
2360 } while (card->sbfqc < card->sbnr.min);
2361}
2362
2363static void ns_lb_destructor(struct sk_buff *lb)
2364{
2365 ns_dev *card;
2366 u32 stat;
2367
2368 card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
2369 stat = readl(card->membase + STAT);
2370 card->sbfqc = ns_stat_sfbqc_get(stat);
2371 card->lbfqc = ns_stat_lfbqc_get(stat);
2372
2373 do {
2374 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2375 if (lb == NULL)
2376 break;
2377 NS_PRV_BUFTYPE(lb) = BUF_LG;
2378 skb_queue_tail(&card->lbpool.queue, lb);
2379 skb_reserve(lb, NS_SMBUFSIZE);
2380 push_rxbufs(card, lb);
2381 } while (card->lbfqc < card->lbnr.min);
2382}
2383
2384static void ns_hb_destructor(struct sk_buff *hb)
2385{
2386 ns_dev *card;
2387
2388 card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
2389
2390 while (card->hbpool.count < card->hbnr.init) {
2391 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2392 if (hb == NULL)
2393 break;
2394 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2395 skb_queue_tail(&card->hbpool.queue, hb);
2396 card->hbpool.count++;
2397 }
2398}
2399
2400#endif /* NS_USE_DESTRUCTORS */
2401
2402static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) 2320static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
2403{ 2321{
2404 if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { 2322 if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
@@ -2427,9 +2345,6 @@ static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
2427static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) 2345static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2428{ 2346{
2429 skb_unlink(sb, &card->sbpool.queue); 2347 skb_unlink(sb, &card->sbpool.queue);
2430#ifdef NS_USE_DESTRUCTORS
2431 if (card->sbfqc < card->sbnr.min)
2432#else
2433 if (card->sbfqc < card->sbnr.init) { 2348 if (card->sbfqc < card->sbnr.init) {
2434 struct sk_buff *new_sb; 2349 struct sk_buff *new_sb;
2435 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2350 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@@ -2440,7 +2355,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2440 } 2355 }
2441 } 2356 }
2442 if (card->sbfqc < card->sbnr.init) 2357 if (card->sbfqc < card->sbnr.init)
2443#endif /* NS_USE_DESTRUCTORS */
2444 { 2358 {
2445 struct sk_buff *new_sb; 2359 struct sk_buff *new_sb;
2446 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2360 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@@ -2455,9 +2369,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2455static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) 2369static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2456{ 2370{
2457 skb_unlink(lb, &card->lbpool.queue); 2371 skb_unlink(lb, &card->lbpool.queue);
2458#ifdef NS_USE_DESTRUCTORS
2459 if (card->lbfqc < card->lbnr.min)
2460#else
2461 if (card->lbfqc < card->lbnr.init) { 2372 if (card->lbfqc < card->lbnr.init) {
2462 struct sk_buff *new_lb; 2373 struct sk_buff *new_lb;
2463 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2374 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
@@ -2468,7 +2379,6 @@ static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2468 } 2379 }
2469 } 2380 }
2470 if (card->lbfqc < card->lbnr.init) 2381 if (card->lbfqc < card->lbnr.init)
2471#endif /* NS_USE_DESTRUCTORS */
2472 { 2382 {
2473 struct sk_buff *new_lb; 2383 struct sk_buff *new_lb;
2474 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2384 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {