aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2005-11-05 15:14:21 -0500
committerThomas Graf <tgr@axs.localdomain>2005-11-05 16:02:28 -0500
commit18e3fb84e698dcab1c5fa7b7c89921b826bb5620 (patch)
tree68b7d3907cc72a83cf7ad6e1f7f27f9c6ffd06be /net
parent716a1b40b0ed630570edd4e2bf9053c421e9770b (diff)
[PKT_SCHED]: GRED: Improve error handling and messages
Try to enqueue packets if we cannot associate it with a VQ, this basically means that the default VQ has not been set up yet. We must check if the VQ still exists while requeueing, the VQ might have been changed between dequeue and the requeue of the underlying qdisc. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_gred.c68
1 files changed, 44 insertions, 24 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 38dab959feed..646dbdc4ef29 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -176,20 +176,24 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
176 dp = tc_index_to_dp(skb); 176 dp = tc_index_to_dp(skb);
177 177
178 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { 178 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
179 printk("GRED: setting to default (%d)\n ",t->def); 179 dp = t->def;
180 if (!(q=t->tab[t->def])) { 180
181 DPRINTK("GRED: setting to default FAILED! dropping!! " 181 if ((q = t->tab[dp]) == NULL) {
182 "(%d)\n ", t->def); 182 /* Pass through packets not assigned to a DP
183 goto drop; 183 * if no default DP has been configured. This
184 * allows for DP flows to be left untouched.
185 */
186 if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
187 return qdisc_enqueue_tail(skb, sch);
188 else
189 goto drop;
184 } 190 }
191
185 /* fix tc_index? --could be controvesial but needed for 192 /* fix tc_index? --could be controvesial but needed for
186 requeueing */ 193 requeueing */
187 skb->tc_index=(skb->tc_index & ~GRED_VQ_MASK) | t->def; 194 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
188 } 195 }
189 196
190 D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
191 "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
192 sch->qstats.backlog);
193 /* sum up all the qaves of prios <= to ours to get the new qave*/ 197 /* sum up all the qaves of prios <= to ours to get the new qave*/
194 if (!gred_wred_mode(t) && gred_rio_mode(t)) { 198 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
195 for (i=0;i<t->DPs;i++) { 199 for (i=0;i<t->DPs;i++) {
@@ -254,13 +258,20 @@ static int
254gred_requeue(struct sk_buff *skb, struct Qdisc* sch) 258gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
255{ 259{
256 struct gred_sched *t = qdisc_priv(sch); 260 struct gred_sched *t = qdisc_priv(sch);
257 struct gred_sched_data *q = t->tab[tc_index_to_dp(skb)]; 261 struct gred_sched_data *q;
258/* error checking here -- probably unnecessary */ 262 u16 dp = tc_index_to_dp(skb);
259 263
260 if (red_is_idling(&q->parms)) 264 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
261 red_end_of_idle_period(&q->parms); 265 if (net_ratelimit())
266 printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
267 "for requeue, screwing up backlog.\n",
268 tc_index_to_dp(skb));
269 } else {
270 if (red_is_idling(&q->parms))
271 red_end_of_idle_period(&q->parms);
272 q->backlog += skb->len;
273 }
262 274
263 q->backlog += skb->len;
264 return qdisc_requeue(skb, sch); 275 return qdisc_requeue(skb, sch);
265} 276}
266 277
@@ -274,15 +285,20 @@ gred_dequeue(struct Qdisc* sch)
274 skb = qdisc_dequeue_head(sch); 285 skb = qdisc_dequeue_head(sch);
275 286
276 if (skb) { 287 if (skb) {
277 q = t->tab[tc_index_to_dp(skb)]; 288 u16 dp = tc_index_to_dp(skb);
278 if (q) { 289
290 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
291 if (net_ratelimit())
292 printk(KERN_WARNING "GRED: Unable to relocate "
293 "VQ 0x%x after dequeue, screwing up "
294 "backlog.\n", tc_index_to_dp(skb));
295 } else {
279 q->backlog -= skb->len; 296 q->backlog -= skb->len;
297
280 if (!q->backlog && !gred_wred_mode(t)) 298 if (!q->backlog && !gred_wred_mode(t))
281 red_start_of_idle_period(&q->parms); 299 red_start_of_idle_period(&q->parms);
282 } else {
283 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",
284 tc_index_to_dp(skb));
285 } 300 }
301
286 return skb; 302 return skb;
287 } 303 }
288 304
@@ -308,15 +324,19 @@ static unsigned int gred_drop(struct Qdisc* sch)
308 skb = qdisc_dequeue_tail(sch); 324 skb = qdisc_dequeue_tail(sch);
309 if (skb) { 325 if (skb) {
310 unsigned int len = skb->len; 326 unsigned int len = skb->len;
311 q = t->tab[tc_index_to_dp(skb)]; 327 u16 dp = tc_index_to_dp(skb);
312 if (q) { 328
329 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
330 if (net_ratelimit())
331 printk(KERN_WARNING "GRED: Unable to relocate "
332 "VQ 0x%x while dropping, screwing up "
333 "backlog.\n", tc_index_to_dp(skb));
334 } else {
313 q->backlog -= len; 335 q->backlog -= len;
314 q->stats.other++; 336 q->stats.other++;
337
315 if (!q->backlog && !gred_wred_mode(t)) 338 if (!q->backlog && !gred_wred_mode(t))
316 red_start_of_idle_period(&q->parms); 339 red_start_of_idle_period(&q->parms);
317 } else {
318 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",
319 tc_index_to_dp(skb));
320 } 340 }
321 341
322 qdisc_drop(skb, sch); 342 qdisc_drop(skb, sch);