aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-07-03 01:48:13 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-11 01:16:39 -0400
commitee39e10c27ca5293c72addb95bff864095e19904 (patch)
treeab88b149b0aee405933e55a1bdb3806b3253a481 /net/sched/sch_htb.c
parent4bdf39911e7a887c4499161422423cbaf16684e8 (diff)
[NET_SCHED]: sch_htb: use generic estimator
Use the generic estimator instead of reimplementing (parts of) it. For compatibility always create a default estimator for new classes. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c85
1 files changed, 24 insertions, 61 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 035788c5b7f8..26f81b848bfd 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -69,8 +69,6 @@
69*/ 69*/
70 70
71#define HTB_HSIZE 16 /* classid hash size */ 71#define HTB_HSIZE 16 /* classid hash size */
72#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
73#define HTB_RATECM 1 /* whether to use rate computer */
74#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */ 72#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
75#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ 73#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
76 74
@@ -95,12 +93,6 @@ struct htb_class {
95 struct tc_htb_xstats xstats; /* our special stats */ 93 struct tc_htb_xstats xstats; /* our special stats */
96 int refcnt; /* usage count of this class */ 94 int refcnt; /* usage count of this class */
97 95
98#ifdef HTB_RATECM
99 /* rate measurement counters */
100 unsigned long rate_bytes, sum_bytes;
101 unsigned long rate_packets, sum_packets;
102#endif
103
104 /* topology */ 96 /* topology */
105 int level; /* our level (see above) */ 97 int level; /* our level (see above) */
106 struct htb_class *parent; /* parent class */ 98 struct htb_class *parent; /* parent class */
@@ -194,10 +186,6 @@ struct htb_sched {
194 int rate2quantum; /* quant = rate / rate2quantum */ 186 int rate2quantum; /* quant = rate / rate2quantum */
195 psched_time_t now; /* cached dequeue time */ 187 psched_time_t now; /* cached dequeue time */
196 struct qdisc_watchdog watchdog; 188 struct qdisc_watchdog watchdog;
197#ifdef HTB_RATECM
198 struct timer_list rttim; /* rate computer timer */
199 int recmp_bucket; /* which hash bucket to recompute next */
200#endif
201 189
202 /* non shaped skbs; let them go directly thru */ 190 /* non shaped skbs; let them go directly thru */
203 struct sk_buff_head direct_queue; 191 struct sk_buff_head direct_queue;
@@ -677,34 +665,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
677 return NET_XMIT_SUCCESS; 665 return NET_XMIT_SUCCESS;
678} 666}
679 667
680#ifdef HTB_RATECM
681#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
682static void htb_rate_timer(unsigned long arg)
683{
684 struct Qdisc *sch = (struct Qdisc *)arg;
685 struct htb_sched *q = qdisc_priv(sch);
686 struct hlist_node *p;
687 struct htb_class *cl;
688
689
690 /* lock queue so that we can muck with it */
691 spin_lock_bh(&sch->dev->queue_lock);
692
693 q->rttim.expires = jiffies + HZ;
694 add_timer(&q->rttim);
695
696 /* scan and recompute one bucket at time */
697 if (++q->recmp_bucket >= HTB_HSIZE)
698 q->recmp_bucket = 0;
699
700 hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
701 RT_GEN(cl->sum_bytes, cl->rate_bytes);
702 RT_GEN(cl->sum_packets, cl->rate_packets);
703 }
704 spin_unlock_bh(&sch->dev->queue_lock);
705}
706#endif
707
708/** 668/**
709 * htb_charge_class - charges amount "bytes" to leaf and ancestors 669 * htb_charge_class - charges amount "bytes" to leaf and ancestors
710 * 670 *
@@ -750,11 +710,6 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
750 if (cl->cmode != HTB_CAN_SEND) 710 if (cl->cmode != HTB_CAN_SEND)
751 htb_add_to_wait_tree(q, cl, diff); 711 htb_add_to_wait_tree(q, cl, diff);
752 } 712 }
753#ifdef HTB_RATECM
754 /* update rate counters */
755 cl->sum_bytes += bytes;
756 cl->sum_packets++;
757#endif
758 713
759 /* update byte stats except for leaves which are already updated */ 714 /* update byte stats except for leaves which are already updated */
760 if (cl->level) { 715 if (cl->level) {
@@ -1095,13 +1050,6 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1095 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1050 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1096 q->direct_qlen = 2; 1051 q->direct_qlen = 2;
1097 1052
1098#ifdef HTB_RATECM
1099 init_timer(&q->rttim);
1100 q->rttim.function = htb_rate_timer;
1101 q->rttim.data = (unsigned long)sch;
1102 q->rttim.expires = jiffies + HZ;
1103 add_timer(&q->rttim);
1104#endif
1105 if ((q->rate2quantum = gopt->rate2quantum) < 1) 1053 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1106 q->rate2quantum = 1; 1054 q->rate2quantum = 1;
1107 q->defcls = gopt->defcls; 1055 q->defcls = gopt->defcls;
@@ -1175,11 +1123,6 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1175{ 1123{
1176 struct htb_class *cl = (struct htb_class *)arg; 1124 struct htb_class *cl = (struct htb_class *)arg;
1177 1125
1178#ifdef HTB_RATECM
1179 cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1180 cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
1181#endif
1182
1183 if (!cl->level && cl->un.leaf.q) 1126 if (!cl->level && cl->un.leaf.q)
1184 cl->qstats.qlen = cl->un.leaf.q->q.qlen; 1127 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1185 cl->xstats.tokens = cl->tokens; 1128 cl->xstats.tokens = cl->tokens;
@@ -1277,6 +1220,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1277 BUG_TRAP(cl->un.leaf.q); 1220 BUG_TRAP(cl->un.leaf.q);
1278 qdisc_destroy(cl->un.leaf.q); 1221 qdisc_destroy(cl->un.leaf.q);
1279 } 1222 }
1223 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1280 qdisc_put_rtab(cl->rate); 1224 qdisc_put_rtab(cl->rate);
1281 qdisc_put_rtab(cl->ceil); 1225 qdisc_put_rtab(cl->ceil);
1282 1226
@@ -1305,9 +1249,6 @@ static void htb_destroy(struct Qdisc *sch)
1305 struct htb_sched *q = qdisc_priv(sch); 1249 struct htb_sched *q = qdisc_priv(sch);
1306 1250
1307 qdisc_watchdog_cancel(&q->watchdog); 1251 qdisc_watchdog_cancel(&q->watchdog);
1308#ifdef HTB_RATECM
1309 del_timer_sync(&q->rttim);
1310#endif
1311 /* This line used to be after htb_destroy_class call below 1252 /* This line used to be after htb_destroy_class call below
1312 and surprisingly it worked in 2.4. But it must precede it 1253 and surprisingly it worked in 2.4. But it must precede it
1313 because filter need its target class alive to be able to call 1254 because filter need its target class alive to be able to call
@@ -1403,6 +1344,20 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1403 if (!cl) { /* new class */ 1344 if (!cl) { /* new class */
1404 struct Qdisc *new_q; 1345 struct Qdisc *new_q;
1405 int prio; 1346 int prio;
1347 struct {
1348 struct rtattr rta;
1349 struct gnet_estimator opt;
1350 } est = {
1351 .rta = {
1352 .rta_len = RTA_LENGTH(sizeof(est.opt)),
1353 .rta_type = TCA_RATE,
1354 },
1355 .opt = {
1356 /* 4s interval, 16s averaging constant */
1357 .interval = 2,
1358 .ewma_log = 2,
1359 },
1360 };
1406 1361
1407 /* check for valid classid */ 1362 /* check for valid classid */
1408 if (!classid || TC_H_MAJ(classid ^ sch->handle) 1363 if (!classid || TC_H_MAJ(classid ^ sch->handle)
@@ -1418,6 +1373,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1418 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1373 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1419 goto failure; 1374 goto failure;
1420 1375
1376 gen_new_estimator(&cl->bstats, &cl->rate_est,
1377 &sch->dev->queue_lock,
1378 tca[TCA_RATE-1] ? : &est.rta);
1421 cl->refcnt = 1; 1379 cl->refcnt = 1;
1422 INIT_LIST_HEAD(&cl->sibling); 1380 INIT_LIST_HEAD(&cl->sibling);
1423 INIT_HLIST_NODE(&cl->hlist); 1381 INIT_HLIST_NODE(&cl->hlist);
@@ -1469,8 +1427,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1469 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); 1427 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1470 list_add_tail(&cl->sibling, 1428 list_add_tail(&cl->sibling,
1471 parent ? &parent->children : &q->root); 1429 parent ? &parent->children : &q->root);
1472 } else 1430 } else {
1431 if (tca[TCA_RATE-1])
1432 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1433 &sch->dev->queue_lock,
1434 tca[TCA_RATE-1]);
1473 sch_tree_lock(sch); 1435 sch_tree_lock(sch);
1436 }
1474 1437
1475 /* it used to be a nasty bug here, we have to check that node 1438 /* it used to be a nasty bug here, we have to check that node
1476 is really leaf before changing cl->un.leaf ! */ 1439 is really leaf before changing cl->un.leaf ! */