diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-09-28 18:33:51 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:53:41 -0400 |
commit | de90351219a1f1fd3cb45cf6fcc4e9d6407fd2c9 (patch) | |
tree | 5fd6193eb233e900452f719fd56d7065b2fb939e | |
parent | 587117414909e9c52f50e3c9d1f85b3dc1815d75 (diff) |
[IPoIB]: Convert to netdevice internal stats
Use the stats member of struct netdevice in IPoIB, so we can save
memory by deleting the stats member of struct ipoib_dev_priv, and save
code by deleting ipoib_get_stats().
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 20 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 10 |
5 files changed, 31 insertions, 41 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 35f3ca42bd60..34c6128d2a34 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -280,8 +280,6 @@ struct ipoib_dev_priv { | |||
280 | 280 | ||
281 | struct ib_event_handler event_handler; | 281 | struct ib_event_handler event_handler; |
282 | 282 | ||
283 | struct net_device_stats stats; | ||
284 | |||
285 | struct net_device *parent; | 283 | struct net_device *parent; |
286 | struct list_head child_intfs; | 284 | struct list_head child_intfs; |
287 | struct list_head list; | 285 | struct list_head list; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 08b4676a3820..1afd93cdd6bb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -430,7 +430,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
430 | ipoib_dbg(priv, "cm recv error " | 430 | ipoib_dbg(priv, "cm recv error " |
431 | "(status=%d, wrid=%d vend_err %x)\n", | 431 | "(status=%d, wrid=%d vend_err %x)\n", |
432 | wc->status, wr_id, wc->vendor_err); | 432 | wc->status, wr_id, wc->vendor_err); |
433 | ++priv->stats.rx_dropped; | 433 | ++dev->stats.rx_dropped; |
434 | goto repost; | 434 | goto repost; |
435 | } | 435 | } |
436 | 436 | ||
@@ -457,7 +457,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
457 | * this packet and reuse the old buffer. | 457 | * this packet and reuse the old buffer. |
458 | */ | 458 | */ |
459 | ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); | 459 | ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); |
460 | ++priv->stats.rx_dropped; | 460 | ++dev->stats.rx_dropped; |
461 | goto repost; | 461 | goto repost; |
462 | } | 462 | } |
463 | 463 | ||
@@ -474,8 +474,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
474 | skb_pull(skb, IPOIB_ENCAP_LEN); | 474 | skb_pull(skb, IPOIB_ENCAP_LEN); |
475 | 475 | ||
476 | dev->last_rx = jiffies; | 476 | dev->last_rx = jiffies; |
477 | ++priv->stats.rx_packets; | 477 | ++dev->stats.rx_packets; |
478 | priv->stats.rx_bytes += skb->len; | 478 | dev->stats.rx_bytes += skb->len; |
479 | 479 | ||
480 | skb->dev = dev; | 480 | skb->dev = dev; |
481 | /* XXX get correct PACKET_ type here */ | 481 | /* XXX get correct PACKET_ type here */ |
@@ -512,8 +512,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
512 | if (unlikely(skb->len > tx->mtu)) { | 512 | if (unlikely(skb->len > tx->mtu)) { |
513 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", | 513 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", |
514 | skb->len, tx->mtu); | 514 | skb->len, tx->mtu); |
515 | ++priv->stats.tx_dropped; | 515 | ++dev->stats.tx_dropped; |
516 | ++priv->stats.tx_errors; | 516 | ++dev->stats.tx_errors; |
517 | ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); | 517 | ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); |
518 | return; | 518 | return; |
519 | } | 519 | } |
@@ -532,7 +532,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
532 | tx_req->skb = skb; | 532 | tx_req->skb = skb; |
533 | addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); | 533 | addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); |
534 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { | 534 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { |
535 | ++priv->stats.tx_errors; | 535 | ++dev->stats.tx_errors; |
536 | dev_kfree_skb_any(skb); | 536 | dev_kfree_skb_any(skb); |
537 | return; | 537 | return; |
538 | } | 538 | } |
@@ -542,7 +542,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
542 | if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), | 542 | if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), |
543 | addr, skb->len))) { | 543 | addr, skb->len))) { |
544 | ipoib_warn(priv, "post_send failed\n"); | 544 | ipoib_warn(priv, "post_send failed\n"); |
545 | ++priv->stats.tx_errors; | 545 | ++dev->stats.tx_errors; |
546 | ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); | 546 | ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); |
547 | dev_kfree_skb_any(skb); | 547 | dev_kfree_skb_any(skb); |
548 | } else { | 548 | } else { |
@@ -580,8 +580,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx | |||
580 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); | 580 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); |
581 | 581 | ||
582 | /* FIXME: is this right? Shouldn't we only increment on success? */ | 582 | /* FIXME: is this right? Shouldn't we only increment on success? */ |
583 | ++priv->stats.tx_packets; | 583 | ++dev->stats.tx_packets; |
584 | priv->stats.tx_bytes += tx_req->skb->len; | 584 | dev->stats.tx_bytes += tx_req->skb->len; |
585 | 585 | ||
586 | dev_kfree_skb_any(tx_req->skb); | 586 | dev_kfree_skb_any(tx_req->skb); |
587 | 587 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 481e4b6bd949..0ec28c302fbf 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -208,7 +208,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
208 | * this packet and reuse the old buffer. | 208 | * this packet and reuse the old buffer. |
209 | */ | 209 | */ |
210 | if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { | 210 | if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { |
211 | ++priv->stats.rx_dropped; | 211 | ++dev->stats.rx_dropped; |
212 | goto repost; | 212 | goto repost; |
213 | } | 213 | } |
214 | 214 | ||
@@ -225,8 +225,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
225 | skb_pull(skb, IPOIB_ENCAP_LEN); | 225 | skb_pull(skb, IPOIB_ENCAP_LEN); |
226 | 226 | ||
227 | dev->last_rx = jiffies; | 227 | dev->last_rx = jiffies; |
228 | ++priv->stats.rx_packets; | 228 | ++dev->stats.rx_packets; |
229 | priv->stats.rx_bytes += skb->len; | 229 | dev->stats.rx_bytes += skb->len; |
230 | 230 | ||
231 | skb->dev = dev; | 231 | skb->dev = dev; |
232 | /* XXX get correct PACKET_ type here */ | 232 | /* XXX get correct PACKET_ type here */ |
@@ -260,8 +260,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
260 | ib_dma_unmap_single(priv->ca, tx_req->mapping, | 260 | ib_dma_unmap_single(priv->ca, tx_req->mapping, |
261 | tx_req->skb->len, DMA_TO_DEVICE); | 261 | tx_req->skb->len, DMA_TO_DEVICE); |
262 | 262 | ||
263 | ++priv->stats.tx_packets; | 263 | ++dev->stats.tx_packets; |
264 | priv->stats.tx_bytes += tx_req->skb->len; | 264 | dev->stats.tx_bytes += tx_req->skb->len; |
265 | 265 | ||
266 | dev_kfree_skb_any(tx_req->skb); | 266 | dev_kfree_skb_any(tx_req->skb); |
267 | 267 | ||
@@ -362,8 +362,8 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
362 | if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { | 362 | if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { |
363 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", | 363 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", |
364 | skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); | 364 | skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); |
365 | ++priv->stats.tx_dropped; | 365 | ++dev->stats.tx_dropped; |
366 | ++priv->stats.tx_errors; | 366 | ++dev->stats.tx_errors; |
367 | ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); | 367 | ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); |
368 | return; | 368 | return; |
369 | } | 369 | } |
@@ -383,7 +383,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
383 | addr = ib_dma_map_single(priv->ca, skb->data, skb->len, | 383 | addr = ib_dma_map_single(priv->ca, skb->data, skb->len, |
384 | DMA_TO_DEVICE); | 384 | DMA_TO_DEVICE); |
385 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { | 385 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { |
386 | ++priv->stats.tx_errors; | 386 | ++dev->stats.tx_errors; |
387 | dev_kfree_skb_any(skb); | 387 | dev_kfree_skb_any(skb); |
388 | return; | 388 | return; |
389 | } | 389 | } |
@@ -392,7 +392,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
392 | if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), | 392 | if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), |
393 | address->ah, qpn, addr, skb->len))) { | 393 | address->ah, qpn, addr, skb->len))) { |
394 | ipoib_warn(priv, "post_send failed\n"); | 394 | ipoib_warn(priv, "post_send failed\n"); |
395 | ++priv->stats.tx_errors; | 395 | ++dev->stats.tx_errors; |
396 | ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); | 396 | ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); |
397 | dev_kfree_skb_any(skb); | 397 | dev_kfree_skb_any(skb); |
398 | } else { | 398 | } else { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2bd76ef57154..6b1b4b2ec5ba 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -517,7 +517,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
517 | 517 | ||
518 | neigh = ipoib_neigh_alloc(skb->dst->neighbour); | 518 | neigh = ipoib_neigh_alloc(skb->dst->neighbour); |
519 | if (!neigh) { | 519 | if (!neigh) { |
520 | ++priv->stats.tx_dropped; | 520 | ++dev->stats.tx_dropped; |
521 | dev_kfree_skb_any(skb); | 521 | dev_kfree_skb_any(skb); |
522 | return; | 522 | return; |
523 | } | 523 | } |
@@ -582,7 +582,7 @@ err_list: | |||
582 | err_path: | 582 | err_path: |
583 | ipoib_neigh_free(dev, neigh); | 583 | ipoib_neigh_free(dev, neigh); |
584 | err_drop: | 584 | err_drop: |
585 | ++priv->stats.tx_dropped; | 585 | ++dev->stats.tx_dropped; |
586 | dev_kfree_skb_any(skb); | 586 | dev_kfree_skb_any(skb); |
587 | 587 | ||
588 | spin_unlock(&priv->lock); | 588 | spin_unlock(&priv->lock); |
@@ -631,7 +631,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
631 | } else | 631 | } else |
632 | __path_add(dev, path); | 632 | __path_add(dev, path); |
633 | } else { | 633 | } else { |
634 | ++priv->stats.tx_dropped; | 634 | ++dev->stats.tx_dropped; |
635 | dev_kfree_skb_any(skb); | 635 | dev_kfree_skb_any(skb); |
636 | } | 636 | } |
637 | 637 | ||
@@ -650,7 +650,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
650 | skb_push(skb, sizeof *phdr); | 650 | skb_push(skb, sizeof *phdr); |
651 | __skb_queue_tail(&path->queue, skb); | 651 | __skb_queue_tail(&path->queue, skb); |
652 | } else { | 652 | } else { |
653 | ++priv->stats.tx_dropped; | 653 | ++dev->stats.tx_dropped; |
654 | dev_kfree_skb_any(skb); | 654 | dev_kfree_skb_any(skb); |
655 | } | 655 | } |
656 | 656 | ||
@@ -718,7 +718,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
718 | __skb_queue_tail(&neigh->queue, skb); | 718 | __skb_queue_tail(&neigh->queue, skb); |
719 | spin_unlock(&priv->lock); | 719 | spin_unlock(&priv->lock); |
720 | } else { | 720 | } else { |
721 | ++priv->stats.tx_dropped; | 721 | ++dev->stats.tx_dropped; |
722 | dev_kfree_skb_any(skb); | 722 | dev_kfree_skb_any(skb); |
723 | } | 723 | } |
724 | } else { | 724 | } else { |
@@ -744,7 +744,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
744 | IPOIB_QPN(phdr->hwaddr), | 744 | IPOIB_QPN(phdr->hwaddr), |
745 | IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); | 745 | IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); |
746 | dev_kfree_skb_any(skb); | 746 | dev_kfree_skb_any(skb); |
747 | ++priv->stats.tx_dropped; | 747 | ++dev->stats.tx_dropped; |
748 | goto out; | 748 | goto out; |
749 | } | 749 | } |
750 | 750 | ||
@@ -758,13 +758,6 @@ out: | |||
758 | return NETDEV_TX_OK; | 758 | return NETDEV_TX_OK; |
759 | } | 759 | } |
760 | 760 | ||
761 | static struct net_device_stats *ipoib_get_stats(struct net_device *dev) | ||
762 | { | ||
763 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
764 | |||
765 | return &priv->stats; | ||
766 | } | ||
767 | |||
768 | static void ipoib_timeout(struct net_device *dev) | 761 | static void ipoib_timeout(struct net_device *dev) |
769 | { | 762 | { |
770 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 763 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
@@ -865,7 +858,7 @@ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) | |||
865 | struct sk_buff *skb; | 858 | struct sk_buff *skb; |
866 | *to_ipoib_neigh(neigh->neighbour) = NULL; | 859 | *to_ipoib_neigh(neigh->neighbour) = NULL; |
867 | while ((skb = __skb_dequeue(&neigh->queue))) { | 860 | while ((skb = __skb_dequeue(&neigh->queue))) { |
868 | ++priv->stats.tx_dropped; | 861 | ++dev->stats.tx_dropped; |
869 | dev_kfree_skb_any(skb); | 862 | dev_kfree_skb_any(skb); |
870 | } | 863 | } |
871 | if (ipoib_cm_get(neigh)) | 864 | if (ipoib_cm_get(neigh)) |
@@ -952,7 +945,6 @@ static void ipoib_setup(struct net_device *dev) | |||
952 | dev->stop = ipoib_stop; | 945 | dev->stop = ipoib_stop; |
953 | dev->change_mtu = ipoib_change_mtu; | 946 | dev->change_mtu = ipoib_change_mtu; |
954 | dev->hard_start_xmit = ipoib_start_xmit; | 947 | dev->hard_start_xmit = ipoib_start_xmit; |
955 | dev->get_stats = ipoib_get_stats; | ||
956 | dev->tx_timeout = ipoib_timeout; | 948 | dev->tx_timeout = ipoib_timeout; |
957 | dev->header_ops = &ipoib_header_ops; | 949 | dev->header_ops = &ipoib_header_ops; |
958 | dev->set_multicast_list = ipoib_set_mcast_list; | 950 | dev->set_multicast_list = ipoib_set_mcast_list; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index aae367057a56..98e904a7f3e8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -125,7 +125,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) | |||
125 | } | 125 | } |
126 | 126 | ||
127 | spin_lock_irqsave(&priv->tx_lock, flags); | 127 | spin_lock_irqsave(&priv->tx_lock, flags); |
128 | priv->stats.tx_dropped += tx_dropped; | 128 | dev->stats.tx_dropped += tx_dropped; |
129 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 129 | spin_unlock_irqrestore(&priv->tx_lock, flags); |
130 | 130 | ||
131 | kfree(mcast); | 131 | kfree(mcast); |
@@ -320,7 +320,7 @@ ipoib_mcast_sendonly_join_complete(int status, | |||
320 | /* Flush out any queued packets */ | 320 | /* Flush out any queued packets */ |
321 | spin_lock_irq(&priv->tx_lock); | 321 | spin_lock_irq(&priv->tx_lock); |
322 | while (!skb_queue_empty(&mcast->pkt_queue)) { | 322 | while (!skb_queue_empty(&mcast->pkt_queue)) { |
323 | ++priv->stats.tx_dropped; | 323 | ++dev->stats.tx_dropped; |
324 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); | 324 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
325 | } | 325 | } |
326 | spin_unlock_irq(&priv->tx_lock); | 326 | spin_unlock_irq(&priv->tx_lock); |
@@ -675,7 +675,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) | |||
675 | if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || | 675 | if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || |
676 | !priv->broadcast || | 676 | !priv->broadcast || |
677 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { | 677 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { |
678 | ++priv->stats.tx_dropped; | 678 | ++dev->stats.tx_dropped; |
679 | dev_kfree_skb_any(skb); | 679 | dev_kfree_skb_any(skb); |
680 | goto unlock; | 680 | goto unlock; |
681 | } | 681 | } |
@@ -690,7 +690,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) | |||
690 | if (!mcast) { | 690 | if (!mcast) { |
691 | ipoib_warn(priv, "unable to allocate memory for " | 691 | ipoib_warn(priv, "unable to allocate memory for " |
692 | "multicast structure\n"); | 692 | "multicast structure\n"); |
693 | ++priv->stats.tx_dropped; | 693 | ++dev->stats.tx_dropped; |
694 | dev_kfree_skb_any(skb); | 694 | dev_kfree_skb_any(skb); |
695 | goto out; | 695 | goto out; |
696 | } | 696 | } |
@@ -705,7 +705,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) | |||
705 | if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) | 705 | if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) |
706 | skb_queue_tail(&mcast->pkt_queue, skb); | 706 | skb_queue_tail(&mcast->pkt_queue, skb); |
707 | else { | 707 | else { |
708 | ++priv->stats.tx_dropped; | 708 | ++dev->stats.tx_dropped; |
709 | dev_kfree_skb_any(skb); | 709 | dev_kfree_skb_any(skb); |
710 | } | 710 | } |
711 | 711 | ||