aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tulip/de4x5.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-09-23 03:19:19 -0400
committerDavid S. Miller <davem@davemloft.net>2008-09-23 03:19:19 -0400
commit2aad7c8e9b4d1cabdeac2469e7276e9daef12fa0 (patch)
treefc4b0ee0b534942e833982c31f28ad74eefa4d77 /drivers/net/tulip/de4x5.c
parentebf059821ed8a36acd706484b719d14d212ada32 (diff)
de4x5: Use skb_queue_head instead of by-hand implementation.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tulip/de4x5.c')
-rw-r--r--drivers/net/tulip/de4x5.c38
1 files changed, 8 insertions, 30 deletions
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 617ef41bdfea..6444cbec0bdc 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -832,7 +832,7 @@ struct de4x5_private {
832 s32 csr14; /* Saved SIA TX/RX Register */ 832 s32 csr14; /* Saved SIA TX/RX Register */
833 s32 csr15; /* Saved SIA General Register */ 833 s32 csr15; /* Saved SIA General Register */
834 int save_cnt; /* Flag if state already saved */ 834 int save_cnt; /* Flag if state already saved */
835 struct sk_buff *skb; /* Save the (re-ordered) skb's */ 835 struct sk_buff_head queue; /* Save the (re-ordered) skb's */
836 } cache; 836 } cache;
837 struct de4x5_srom srom; /* A copy of the SROM */ 837 struct de4x5_srom srom; /* A copy of the SROM */
838 int cfrv; /* Card CFRV copy */ 838 int cfrv; /* Card CFRV copy */
@@ -1128,6 +1128,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1128 printk(" which has an Ethernet PROM CRC error.\n"); 1128 printk(" which has an Ethernet PROM CRC error.\n");
1129 return -ENXIO; 1129 return -ENXIO;
1130 } else { 1130 } else {
1131 skb_queue_head_init(&lp->cache.queue);
1131 lp->cache.gepc = GEP_INIT; 1132 lp->cache.gepc = GEP_INIT;
1132 lp->asBit = GEP_SLNK; 1133 lp->asBit = GEP_SLNK;
1133 lp->asPolarity = GEP_SLNK; 1134 lp->asPolarity = GEP_SLNK;
@@ -1487,7 +1488,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1487 } 1488 }
1488 } else if (skb->len > 0) { 1489 } else if (skb->len > 0) {
1489 /* If we already have stuff queued locally, use that first */ 1490 /* If we already have stuff queued locally, use that first */
1490 if (lp->cache.skb && !lp->interrupt) { 1491 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1491 de4x5_put_cache(dev, skb); 1492 de4x5_put_cache(dev, skb);
1492 skb = de4x5_get_cache(dev); 1493 skb = de4x5_get_cache(dev);
1493 } 1494 }
@@ -1580,7 +1581,7 @@ de4x5_interrupt(int irq, void *dev_id)
1580 1581
1581 /* Load the TX ring with any locally stored packets */ 1582 /* Load the TX ring with any locally stored packets */
1582 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { 1583 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1583 while (lp->cache.skb && !netif_queue_stopped(dev) && lp->tx_enable) { 1584 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1584 de4x5_queue_pkt(de4x5_get_cache(dev), dev); 1585 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1585 } 1586 }
1586 lp->cache.lock = 0; 1587 lp->cache.lock = 0;
@@ -3679,11 +3680,7 @@ de4x5_free_tx_buffs(struct net_device *dev)
3679 } 3680 }
3680 3681
3681 /* Unload the locally queued packets */ 3682 /* Unload the locally queued packets */
3682 while (lp->cache.skb) { 3683 __skb_queue_purge(&lp->cache.queue);
3683 dev_kfree_skb(de4x5_get_cache(dev));
3684 }
3685
3686 return;
3687} 3684}
3688 3685
3689/* 3686/*
@@ -3781,43 +3778,24 @@ static void
3781de4x5_put_cache(struct net_device *dev, struct sk_buff *skb) 3778de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3782{ 3779{
3783 struct de4x5_private *lp = netdev_priv(dev); 3780 struct de4x5_private *lp = netdev_priv(dev);
3784 struct sk_buff *p;
3785
3786 if (lp->cache.skb) {
3787 for (p=lp->cache.skb; p->next; p=p->next);
3788 p->next = skb;
3789 } else {
3790 lp->cache.skb = skb;
3791 }
3792 skb->next = NULL;
3793 3781
3794 return; 3782 __skb_queue_tail(&lp->cache.queue, skb);
3795} 3783}
3796 3784
3797static void 3785static void
3798de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb) 3786de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3799{ 3787{
3800 struct de4x5_private *lp = netdev_priv(dev); 3788 struct de4x5_private *lp = netdev_priv(dev);
3801 struct sk_buff *p = lp->cache.skb;
3802
3803 lp->cache.skb = skb;
3804 skb->next = p;
3805 3789
3806 return; 3790 __skb_queue_head(&lp->cache.queue, skb);
3807} 3791}
3808 3792
3809static struct sk_buff * 3793static struct sk_buff *
3810de4x5_get_cache(struct net_device *dev) 3794de4x5_get_cache(struct net_device *dev)
3811{ 3795{
3812 struct de4x5_private *lp = netdev_priv(dev); 3796 struct de4x5_private *lp = netdev_priv(dev);
3813 struct sk_buff *p = lp->cache.skb;
3814 3797
3815 if (p) { 3798 return __skb_dequeue(&lp->cache.queue);
3816 lp->cache.skb = p->next;
3817 p->next = NULL;
3818 }
3819
3820 return p;
3821} 3799}
3822 3800
3823/* 3801/*