diff options
author | David S. Miller <davem@davemloft.net> | 2009-05-28 19:46:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-05-28 19:46:29 -0400 |
commit | de1033428baf1940bbbbf9e66b073ee0a577f5e0 (patch) | |
tree | af7bde2123719b3618bacf06ce8e6f57c7804e49 /net/econet/af_econet.c | |
parent | bec571ec762a4cf855ad4446f833086fc154b60e (diff) |
econet: Use SKB queue and list helpers instead of doing it by-hand.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/econet/af_econet.c')
-rw-r--r-- | net/econet/af_econet.c | 18 |
1 files changed, 4 insertions, 14 deletions
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 6f479fa522c3..8121bf0029e3 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -901,15 +901,10 @@ static void aun_tx_ack(unsigned long seq, int result) | |||
901 | struct ec_cb *eb; | 901 | struct ec_cb *eb; |
902 | 902 | ||
903 | spin_lock_irqsave(&aun_queue_lock, flags); | 903 | spin_lock_irqsave(&aun_queue_lock, flags); |
904 | skb = skb_peek(&aun_queue); | 904 | skb_queue_walk(&aun_queue, skb) { |
905 | while (skb && skb != (struct sk_buff *)&aun_queue) | ||
906 | { | ||
907 | struct sk_buff *newskb = skb->next; | ||
908 | eb = (struct ec_cb *)&skb->cb; | 905 | eb = (struct ec_cb *)&skb->cb; |
909 | if (eb->seq == seq) | 906 | if (eb->seq == seq) |
910 | goto foundit; | 907 | goto foundit; |
911 | |||
912 | skb = newskb; | ||
913 | } | 908 | } |
914 | spin_unlock_irqrestore(&aun_queue_lock, flags); | 909 | spin_unlock_irqrestore(&aun_queue_lock, flags); |
915 | printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); | 910 | printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); |
@@ -982,23 +977,18 @@ static void aun_data_available(struct sock *sk, int slen) | |||
982 | 977 | ||
983 | static void ab_cleanup(unsigned long h) | 978 | static void ab_cleanup(unsigned long h) |
984 | { | 979 | { |
985 | struct sk_buff *skb; | 980 | struct sk_buff *skb, *n; |
986 | unsigned long flags; | 981 | unsigned long flags; |
987 | 982 | ||
988 | spin_lock_irqsave(&aun_queue_lock, flags); | 983 | spin_lock_irqsave(&aun_queue_lock, flags); |
989 | skb = skb_peek(&aun_queue); | 984 | skb_queue_walk_safe(&aun_queue, skb, n) { |
990 | while (skb && skb != (struct sk_buff *)&aun_queue) | ||
991 | { | ||
992 | struct sk_buff *newskb = skb->next; | ||
993 | struct ec_cb *eb = (struct ec_cb *)&skb->cb; | 985 | struct ec_cb *eb = (struct ec_cb *)&skb->cb; |
994 | if ((jiffies - eb->start) > eb->timeout) | 986 | if ((jiffies - eb->start) > eb->timeout) { |
995 | { | ||
996 | tx_result(skb->sk, eb->cookie, | 987 | tx_result(skb->sk, eb->cookie, |
997 | ECTYPE_TRANSMIT_NOT_PRESENT); | 988 | ECTYPE_TRANSMIT_NOT_PRESENT); |
998 | skb_unlink(skb, &aun_queue); | 989 | skb_unlink(skb, &aun_queue); |
999 | kfree_skb(skb); | 990 | kfree_skb(skb); |
1000 | } | 991 | } |
1001 | skb = newskb; | ||
1002 | } | 992 | } |
1003 | spin_unlock_irqrestore(&aun_queue_lock, flags); | 993 | spin_unlock_irqrestore(&aun_queue_lock, flags); |
1004 | 994 | ||