diff options
author | Thomas Graf <tgraf@suug.ch> | 2005-11-05 15:14:06 -0500 |
---|---|---|
committer | Thomas Graf <tgr@axs.localdomain> | 2005-11-05 16:02:25 -0500 |
commit | 9e178ff27cd9187babe86dc80ef766b722c88da6 (patch) | |
tree | 035c1fe70783a17a8535638be0306cd8a9844a20 | |
parent | 6b31b28a441c9ba33889f88ac1d9451ed9532ada (diff) |
[PKT_SCHED]: RED: Use generic queue management interface
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
-rw-r--r-- | net/sched/sch_red.c | 42 |
1 files changed, 13 insertions, 29 deletions
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 0dabcc9091be..d5e934c33f96 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -105,22 +105,14 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
105 | break; | 105 | break; |
106 | } | 106 | } |
107 | 107 | ||
108 | if (sch->qstats.backlog + skb->len <= q->limit) { | 108 | if (sch->qstats.backlog + skb->len <= q->limit) |
109 | __skb_queue_tail(&sch->q, skb); | 109 | return qdisc_enqueue_tail(skb, sch); |
110 | sch->qstats.backlog += skb->len; | ||
111 | sch->bstats.bytes += skb->len; | ||
112 | sch->bstats.packets++; | ||
113 | return NET_XMIT_SUCCESS; | ||
114 | } | ||
115 | 110 | ||
116 | q->stats.pdrop++; | 111 | q->stats.pdrop++; |
117 | kfree_skb(skb); | 112 | return qdisc_drop(skb, sch); |
118 | sch->qstats.drops++; | ||
119 | return NET_XMIT_DROP; | ||
120 | 113 | ||
121 | congestion_drop: | 114 | congestion_drop: |
122 | kfree_skb(skb); | 115 | qdisc_drop(skb, sch); |
123 | sch->qstats.drops++; | ||
124 | return NET_XMIT_CN; | 116 | return NET_XMIT_CN; |
125 | } | 117 | } |
126 | 118 | ||
@@ -132,10 +124,7 @@ red_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
132 | if (red_is_idling(&q->parms)) | 124 | if (red_is_idling(&q->parms)) |
133 | red_end_of_idle_period(&q->parms); | 125 | red_end_of_idle_period(&q->parms); |
134 | 126 | ||
135 | __skb_queue_head(&sch->q, skb); | 127 | return qdisc_requeue(skb, sch); |
136 | sch->qstats.backlog += skb->len; | ||
137 | sch->qstats.requeues++; | ||
138 | return 0; | ||
139 | } | 128 | } |
140 | 129 | ||
141 | static struct sk_buff * | 130 | static struct sk_buff * |
@@ -144,14 +133,12 @@ red_dequeue(struct Qdisc* sch) | |||
144 | struct sk_buff *skb; | 133 | struct sk_buff *skb; |
145 | struct red_sched_data *q = qdisc_priv(sch); | 134 | struct red_sched_data *q = qdisc_priv(sch); |
146 | 135 | ||
147 | skb = __skb_dequeue(&sch->q); | 136 | skb = qdisc_dequeue_head(sch); |
148 | if (skb) { | ||
149 | sch->qstats.backlog -= skb->len; | ||
150 | return skb; | ||
151 | } | ||
152 | 137 | ||
153 | red_start_of_idle_period(&q->parms); | 138 | if (skb == NULL) |
154 | return NULL; | 139 | red_start_of_idle_period(&q->parms); |
140 | |||
141 | return skb; | ||
155 | } | 142 | } |
156 | 143 | ||
157 | static unsigned int red_drop(struct Qdisc* sch) | 144 | static unsigned int red_drop(struct Qdisc* sch) |
@@ -159,13 +146,11 @@ static unsigned int red_drop(struct Qdisc* sch) | |||
159 | struct sk_buff *skb; | 146 | struct sk_buff *skb; |
160 | struct red_sched_data *q = qdisc_priv(sch); | 147 | struct red_sched_data *q = qdisc_priv(sch); |
161 | 148 | ||
162 | skb = __skb_dequeue_tail(&sch->q); | 149 | skb = qdisc_dequeue_tail(sch); |
163 | if (skb) { | 150 | if (skb) { |
164 | unsigned int len = skb->len; | 151 | unsigned int len = skb->len; |
165 | sch->qstats.backlog -= len; | ||
166 | sch->qstats.drops++; | ||
167 | q->stats.other++; | 152 | q->stats.other++; |
168 | kfree_skb(skb); | 153 | qdisc_drop(skb, sch); |
169 | return len; | 154 | return len; |
170 | } | 155 | } |
171 | 156 | ||
@@ -177,8 +162,7 @@ static void red_reset(struct Qdisc* sch) | |||
177 | { | 162 | { |
178 | struct red_sched_data *q = qdisc_priv(sch); | 163 | struct red_sched_data *q = qdisc_priv(sch); |
179 | 164 | ||
180 | __skb_queue_purge(&sch->q); | 165 | qdisc_reset_queue(sch); |
181 | sch->qstats.backlog = 0; | ||
182 | red_restart(&q->parms); | 166 | red_restart(&q->parms); |
183 | } | 167 | } |
184 | 168 | ||