diff options
| author | Patrick McHardy <kaber@trash.net> | 2008-01-31 21:37:16 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2008-01-31 22:28:35 -0500 |
| commit | 94de78d19580143c407ff2492edf2410d0e7d48c (patch) | |
| tree | 2753a5638e500101ec286fd5a02f042b4a1da129 | |
| parent | 7d2681a6ff4f9ab5e48d02550b4c6338f1638998 (diff) | |
[NET_SCHED]: sch_sfq: make internal queues visible as classes
Add support for dumping statistics and make internal queues visible as
classes.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | include/linux/pkt_sched.h | 5 | ||||
| -rw-r--r-- | net/sched/sch_sfq.c | 41 |
2 files changed, 45 insertions, 1 deletions
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index 32761352e858..dbb7ac37960d 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h | |||
| @@ -150,6 +150,11 @@ struct tc_sfq_qopt | |||
| 150 | unsigned flows; /* Maximal number of flows */ | 150 | unsigned flows; /* Maximal number of flows */ |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | struct tc_sfq_xstats | ||
| 154 | { | ||
| 155 | __s32 allot; | ||
| 156 | }; | ||
| 157 | |||
| 153 | /* | 158 | /* |
| 154 | * NOTE: limit, divisor and flows are hardwired to code at the moment. | 159 | * NOTE: limit, divisor and flows are hardwired to code at the moment. |
| 155 | * | 160 | * |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d818d1985cca..a20e2ef7704b 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
| @@ -566,15 +566,54 @@ static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) | |||
| 566 | return &q->filter_list; | 566 | return &q->filter_list; |
| 567 | } | 567 | } |
| 568 | 568 | ||
| 569 | static int sfq_dump_class(struct Qdisc *sch, unsigned long cl, | ||
| 570 | struct sk_buff *skb, struct tcmsg *tcm) | ||
| 571 | { | ||
| 572 | tcm->tcm_handle |= TC_H_MIN(cl); | ||
| 573 | return 0; | ||
| 574 | } | ||
| 575 | |||
| 576 | static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | ||
| 577 | struct gnet_dump *d) | ||
| 578 | { | ||
| 579 | struct sfq_sched_data *q = qdisc_priv(sch); | ||
| 580 | sfq_index idx = q->ht[cl-1]; | ||
| 581 | struct gnet_stats_queue qs = { .qlen = q->qs[idx].qlen }; | ||
| 582 | struct tc_sfq_xstats xstats = { .allot = q->allot[idx] }; | ||
| 583 | |||
| 584 | if (gnet_stats_copy_queue(d, &qs) < 0) | ||
| 585 | return -1; | ||
| 586 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | ||
| 587 | } | ||
| 588 | |||
| 569 | static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 589 | static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
| 570 | { | 590 | { |
| 571 | return; | 591 | struct sfq_sched_data *q = qdisc_priv(sch); |
| 592 | unsigned int i; | ||
| 593 | |||
| 594 | if (arg->stop) | ||
| 595 | return; | ||
| 596 | |||
| 597 | for (i = 0; i < SFQ_HASH_DIVISOR; i++) { | ||
| 598 | if (q->ht[i] == SFQ_DEPTH || | ||
| 599 | arg->count < arg->skip) { | ||
| 600 | arg->count++; | ||
| 601 | continue; | ||
| 602 | } | ||
| 603 | if (arg->fn(sch, i + 1, arg) < 0) { | ||
| 604 | arg->stop = 1; | ||
| 605 | break; | ||
| 606 | } | ||
| 607 | arg->count++; | ||
| 608 | } | ||
| 572 | } | 609 | } |
| 573 | 610 | ||
| 574 | static const struct Qdisc_class_ops sfq_class_ops = { | 611 | static const struct Qdisc_class_ops sfq_class_ops = { |
| 575 | .get = sfq_get, | 612 | .get = sfq_get, |
| 576 | .change = sfq_change_class, | 613 | .change = sfq_change_class, |
| 577 | .tcf_chain = sfq_find_tcf, | 614 | .tcf_chain = sfq_find_tcf, |
| 615 | .dump = sfq_dump_class, | ||
| 616 | .dump_stats = sfq_dump_class_stats, | ||
| 578 | .walk = sfq_walk, | 617 | .walk = sfq_walk, |
| 579 | }; | 618 | }; |
| 580 | 619 | ||
