aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-01-31 21:37:16 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-31 22:28:35 -0500
commit94de78d19580143c407ff2492edf2410d0e7d48c (patch)
tree2753a5638e500101ec286fd5a02f042b4a1da129
parent7d2681a6ff4f9ab5e48d02550b4c6338f1638998 (diff)
[NET_SCHED]: sch_sfq: make internal queues visible as classes
Add support for dumping statistics and make internal queues visible as classes. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/pkt_sched.h5
-rw-r--r--net/sched/sch_sfq.c41
2 files changed, 45 insertions, 1 deletions
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 32761352e858..dbb7ac37960d 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -150,6 +150,11 @@ struct tc_sfq_qopt
150 unsigned flows; /* Maximal number of flows */ 150 unsigned flows; /* Maximal number of flows */
151}; 151};
152 152
153struct tc_sfq_xstats
154{
155 __s32 allot;
156};
157
153/* 158/*
154 * NOTE: limit, divisor and flows are hardwired to code at the moment. 159 * NOTE: limit, divisor and flows are hardwired to code at the moment.
155 * 160 *
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d818d1985cca..a20e2ef7704b 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -566,15 +566,54 @@ static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
566 return &q->filter_list; 566 return &q->filter_list;
567} 567}
568 568
569static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
570 struct sk_buff *skb, struct tcmsg *tcm)
571{
572 tcm->tcm_handle |= TC_H_MIN(cl);
573 return 0;
574}
575
576static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
577 struct gnet_dump *d)
578{
579 struct sfq_sched_data *q = qdisc_priv(sch);
580 sfq_index idx = q->ht[cl-1];
581 struct gnet_stats_queue qs = { .qlen = q->qs[idx].qlen };
582 struct tc_sfq_xstats xstats = { .allot = q->allot[idx] };
583
584 if (gnet_stats_copy_queue(d, &qs) < 0)
585 return -1;
586 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
587}
588
569static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) 589static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
570{ 590{
571 return; 591 struct sfq_sched_data *q = qdisc_priv(sch);
592 unsigned int i;
593
594 if (arg->stop)
595 return;
596
597 for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
598 if (q->ht[i] == SFQ_DEPTH ||
599 arg->count < arg->skip) {
600 arg->count++;
601 continue;
602 }
603 if (arg->fn(sch, i + 1, arg) < 0) {
604 arg->stop = 1;
605 break;
606 }
607 arg->count++;
608 }
572} 609}
573 610
574static const struct Qdisc_class_ops sfq_class_ops = { 611static const struct Qdisc_class_ops sfq_class_ops = {
575 .get = sfq_get, 612 .get = sfq_get,
576 .change = sfq_change_class, 613 .change = sfq_change_class,
577 .tcf_chain = sfq_find_tcf, 614 .tcf_chain = sfq_find_tcf,
615 .dump = sfq_dump_class,
616 .dump_stats = sfq_dump_class_stats,
578 .walk = sfq_walk, 617 .walk = sfq_walk,
579}; 618};
580 619