diff options
-rw-r--r-- | net/sched/sch_sfq.c | 26 |
1 files changed, 19 insertions, 7 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 6a2f88fea6d8..b76d46b71466 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -67,7 +67,7 @@ | |||
67 | 67 | ||
68 | IMPLEMENTATION: | 68 | IMPLEMENTATION: |
69 | This implementation limits maximal queue length to 128; | 69 | This implementation limits maximal queue length to 128; |
70 | maximal mtu to 2^15-1; max 128 flows, number of hash buckets to 1024. | 70 | max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024. |
71 | The only goal of this restrictions was that all data | 71 | The only goal of this restrictions was that all data |
72 | fit into one 4K page on 32bit arches. | 72 | fit into one 4K page on 32bit arches. |
73 | 73 | ||
@@ -77,6 +77,11 @@ | |||
77 | #define SFQ_SLOTS 128 /* max number of flows */ | 77 | #define SFQ_SLOTS 128 /* max number of flows */ |
78 | #define SFQ_EMPTY_SLOT 255 | 78 | #define SFQ_EMPTY_SLOT 255 |
79 | #define SFQ_HASH_DIVISOR 1024 | 79 | #define SFQ_HASH_DIVISOR 1024 |
80 | /* We use 16 bits to store allot, and want to handle packets up to 64K | ||
81 | * Scale allot by 8 (1<<3) so that no overflow occurs. | ||
82 | */ | ||
83 | #define SFQ_ALLOT_SHIFT 3 | ||
84 | #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT) | ||
80 | 85 | ||
81 | /* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */ | 86 | /* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */ |
82 | typedef unsigned char sfq_index; | 87 | typedef unsigned char sfq_index; |
@@ -115,7 +120,7 @@ struct sfq_sched_data | |||
115 | struct timer_list perturb_timer; | 120 | struct timer_list perturb_timer; |
116 | u32 perturbation; | 121 | u32 perturbation; |
117 | sfq_index cur_depth; /* depth of longest slot */ | 122 | sfq_index cur_depth; /* depth of longest slot */ |
118 | 123 | unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ | |
119 | struct sfq_slot *tail; /* current slot in round */ | 124 | struct sfq_slot *tail; /* current slot in round */ |
120 | sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */ | 125 | sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */ |
121 | struct sfq_slot slots[SFQ_SLOTS]; | 126 | struct sfq_slot slots[SFQ_SLOTS]; |
@@ -395,7 +400,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
395 | q->tail->next = x; | 400 | q->tail->next = x; |
396 | } | 401 | } |
397 | q->tail = slot; | 402 | q->tail = slot; |
398 | slot->allot = q->quantum; | 403 | slot->allot = q->scaled_quantum; |
399 | } | 404 | } |
400 | if (++sch->q.qlen <= q->limit) { | 405 | if (++sch->q.qlen <= q->limit) { |
401 | sch->bstats.bytes += qdisc_pkt_len(skb); | 406 | sch->bstats.bytes += qdisc_pkt_len(skb); |
@@ -431,8 +436,14 @@ sfq_dequeue(struct Qdisc *sch) | |||
431 | if (q->tail == NULL) | 436 | if (q->tail == NULL) |
432 | return NULL; | 437 | return NULL; |
433 | 438 | ||
439 | next_slot: | ||
434 | a = q->tail->next; | 440 | a = q->tail->next; |
435 | slot = &q->slots[a]; | 441 | slot = &q->slots[a]; |
442 | if (slot->allot <= 0) { | ||
443 | q->tail = slot; | ||
444 | slot->allot += q->scaled_quantum; | ||
445 | goto next_slot; | ||
446 | } | ||
436 | skb = slot_dequeue_head(slot); | 447 | skb = slot_dequeue_head(slot); |
437 | sfq_dec(q, a); | 448 | sfq_dec(q, a); |
438 | sch->q.qlen--; | 449 | sch->q.qlen--; |
@@ -447,9 +458,8 @@ sfq_dequeue(struct Qdisc *sch) | |||
447 | return skb; | 458 | return skb; |
448 | } | 459 | } |
449 | q->tail->next = next_a; | 460 | q->tail->next = next_a; |
450 | } else if ((slot->allot -= qdisc_pkt_len(skb)) <= 0) { | 461 | } else { |
451 | q->tail = slot; | 462 | slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb)); |
452 | slot->allot += q->quantum; | ||
453 | } | 463 | } |
454 | return skb; | 464 | return skb; |
455 | } | 465 | } |
@@ -485,6 +495,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) | |||
485 | 495 | ||
486 | sch_tree_lock(sch); | 496 | sch_tree_lock(sch); |
487 | q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch)); | 497 | q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch)); |
498 | q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); | ||
488 | q->perturb_period = ctl->perturb_period * HZ; | 499 | q->perturb_period = ctl->perturb_period * HZ; |
489 | if (ctl->limit) | 500 | if (ctl->limit) |
490 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); | 501 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); |
@@ -525,6 +536,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) | |||
525 | q->tail = NULL; | 536 | q->tail = NULL; |
526 | if (opt == NULL) { | 537 | if (opt == NULL) { |
527 | q->quantum = psched_mtu(qdisc_dev(sch)); | 538 | q->quantum = psched_mtu(qdisc_dev(sch)); |
539 | q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); | ||
528 | q->perturb_period = 0; | 540 | q->perturb_period = 0; |
529 | q->perturbation = net_random(); | 541 | q->perturbation = net_random(); |
530 | } else { | 542 | } else { |
@@ -617,7 +629,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
617 | if (idx != SFQ_EMPTY_SLOT) { | 629 | if (idx != SFQ_EMPTY_SLOT) { |
618 | const struct sfq_slot *slot = &q->slots[idx]; | 630 | const struct sfq_slot *slot = &q->slots[idx]; |
619 | 631 | ||
620 | xstats.allot = slot->allot; | 632 | xstats.allot = slot->allot << SFQ_ALLOT_SHIFT; |
621 | qs.qlen = slot->qlen; | 633 | qs.qlen = slot->qlen; |
622 | slot_queue_walk(slot, skb) | 634 | slot_queue_walk(slot, skb) |
623 | qs.backlog += qdisc_pkt_len(skb); | 635 | qs.backlog += qdisc_pkt_len(skb); |