aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_sfq.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_sfq.c')
-rw-r--r--net/sched/sch_sfq.c71
1 files changed, 10 insertions, 61 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index fe1508ef0d3d..f3965df00559 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -281,7 +281,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
281 struct sfq_sched_data *q = qdisc_priv(sch); 281 struct sfq_sched_data *q = qdisc_priv(sch);
282 unsigned int hash; 282 unsigned int hash;
283 sfq_index x; 283 sfq_index x;
284 int ret; 284 int uninitialized_var(ret);
285 285
286 hash = sfq_classify(skb, sch, &ret); 286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) { 287 if (hash == 0) {
@@ -329,71 +329,20 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
329 return NET_XMIT_CN; 329 return NET_XMIT_CN;
330} 330}
331 331
332static int 332static struct sk_buff *
333sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) 333sfq_peek(struct Qdisc *sch)
334{ 334{
335 struct sfq_sched_data *q = qdisc_priv(sch); 335 struct sfq_sched_data *q = qdisc_priv(sch);
336 unsigned int hash; 336 sfq_index a;
337 sfq_index x;
338 int ret;
339
340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) {
342 if (ret & __NET_XMIT_BYPASS)
343 sch->qstats.drops++;
344 kfree_skb(skb);
345 return ret;
346 }
347 hash--;
348 337
349 x = q->ht[hash]; 338 /* No active slots */
350 if (x == SFQ_DEPTH) { 339 if (q->tail == SFQ_DEPTH)
351 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 340 return NULL;
352 q->hash[x] = hash;
353 }
354
355 sch->qstats.backlog += qdisc_pkt_len(skb);
356 __skb_queue_head(&q->qs[x], skb);
357 /* If selected queue has length q->limit+1, this means that
358 * all another queues are empty and we do simple tail drop.
359 * This packet is still requeued at head of queue, tail packet
360 * is dropped.
361 */
362 if (q->qs[x].qlen > q->limit) {
363 skb = q->qs[x].prev;
364 __skb_unlink(skb, &q->qs[x]);
365 sch->qstats.drops++;
366 sch->qstats.backlog -= qdisc_pkt_len(skb);
367 kfree_skb(skb);
368 return NET_XMIT_CN;
369 }
370
371 sfq_inc(q, x);
372 if (q->qs[x].qlen == 1) { /* The flow is new */
373 if (q->tail == SFQ_DEPTH) { /* It is the first flow */
374 q->tail = x;
375 q->next[x] = x;
376 q->allot[x] = q->quantum;
377 } else {
378 q->next[x] = q->next[q->tail];
379 q->next[q->tail] = x;
380 q->tail = x;
381 }
382 }
383
384 if (++sch->q.qlen <= q->limit) {
385 sch->qstats.requeues++;
386 return 0;
387 }
388 341
389 sch->qstats.drops++; 342 a = q->next[q->tail];
390 sfq_drop(sch); 343 return skb_peek(&q->qs[a]);
391 return NET_XMIT_CN;
392} 344}
393 345
394
395
396
397static struct sk_buff * 346static struct sk_buff *
398sfq_dequeue(struct Qdisc *sch) 347sfq_dequeue(struct Qdisc *sch)
399{ 348{
@@ -624,7 +573,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
624 .priv_size = sizeof(struct sfq_sched_data), 573 .priv_size = sizeof(struct sfq_sched_data),
625 .enqueue = sfq_enqueue, 574 .enqueue = sfq_enqueue,
626 .dequeue = sfq_dequeue, 575 .dequeue = sfq_dequeue,
627 .requeue = sfq_requeue, 576 .peek = sfq_peek,
628 .drop = sfq_drop, 577 .drop = sfq_drop,
629 .init = sfq_init, 578 .init = sfq_init,
630 .reset = sfq_reset, 579 .reset = sfq_reset,