aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/act_api.c6
-rw-r--r--net/sched/act_police.c18
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sched/sch_cbq.c8
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c8
7 files changed, 0 insertions, 60 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f3217942ca87..b4662888bdbd 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -286,7 +286,6 @@ config CLS_U32_MARK
286config NET_CLS_RSVP 286config NET_CLS_RSVP
287 tristate "IPv4 Resource Reservation Protocol (RSVP)" 287 tristate "IPv4 Resource Reservation Protocol (RSVP)"
288 select NET_CLS 288 select NET_CLS
289 select NET_ESTIMATOR
290 ---help--- 289 ---help---
291 The Resource Reservation Protocol (RSVP) permits end systems to 290 The Resource Reservation Protocol (RSVP) permits end systems to
292 request a minimum and maximum data flow rate for a connection; this 291 request a minimum and maximum data flow rate for a connection; this
@@ -301,7 +300,6 @@ config NET_CLS_RSVP
301config NET_CLS_RSVP6 300config NET_CLS_RSVP6
302 tristate "IPv6 Resource Reservation Protocol (RSVP6)" 301 tristate "IPv6 Resource Reservation Protocol (RSVP6)"
303 select NET_CLS 302 select NET_CLS
304 select NET_ESTIMATOR
305 ---help--- 303 ---help---
306 The Resource Reservation Protocol (RSVP) permits end systems to 304 The Resource Reservation Protocol (RSVP) permits end systems to
307 request a minimum and maximum data flow rate for a connection; this 305 request a minimum and maximum data flow rate for a connection; this
@@ -393,7 +391,6 @@ config NET_EMATCH_TEXT
393 391
394config NET_CLS_ACT 392config NET_CLS_ACT
395 bool "Actions" 393 bool "Actions"
396 select NET_ESTIMATOR
397 ---help--- 394 ---help---
398 Say Y here if you want to use traffic control actions. Actions 395 Say Y here if you want to use traffic control actions. Actions
399 get attached to classifiers and are invoked after a successful 396 get attached to classifiers and are invoked after a successful
@@ -476,7 +473,6 @@ config NET_ACT_SIMP
476config NET_CLS_POLICE 473config NET_CLS_POLICE
477 bool "Traffic Policing (obsolete)" 474 bool "Traffic Policing (obsolete)"
478 depends on NET_CLS_ACT!=y 475 depends on NET_CLS_ACT!=y
479 select NET_ESTIMATOR
480 ---help--- 476 ---help---
481 Say Y here if you want to do traffic policing, i.e. strict 477 Say Y here if you want to do traffic policing, i.e. strict
482 bandwidth limiting. This option is obsoleted by the traffic 478 bandwidth limiting. This option is obsoleted by the traffic
@@ -491,14 +487,6 @@ config NET_CLS_IND
491 classification based on the incoming device. This option is 487 classification based on the incoming device. This option is
492 likely to disappear in favour of the metadata ematch. 488 likely to disappear in favour of the metadata ematch.
493 489
494config NET_ESTIMATOR
495 bool "Rate estimator"
496 ---help---
497 Say Y here to allow using rate estimators to estimate the current
498 rate-of-flow for network devices, queues, etc. This module is
499 automatically selected if needed but can be selected manually for
500 statistical purposes.
501
502endif # NET_SCHED 490endif # NET_SCHED
503 491
504endmenu 492endmenu
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 711dd26c95c3..72bb9bd1a22a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -42,10 +42,8 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
42 write_lock_bh(hinfo->lock); 42 write_lock_bh(hinfo->lock);
43 *p1p = p->tcfc_next; 43 *p1p = p->tcfc_next;
44 write_unlock_bh(hinfo->lock); 44 write_unlock_bh(hinfo->lock);
45#ifdef CONFIG_NET_ESTIMATOR
46 gen_kill_estimator(&p->tcfc_bstats, 45 gen_kill_estimator(&p->tcfc_bstats,
47 &p->tcfc_rate_est); 46 &p->tcfc_rate_est);
48#endif
49 kfree(p); 47 kfree(p);
50 return; 48 return;
51 } 49 }
@@ -236,11 +234,9 @@ struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_acti
236 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); 234 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
237 p->tcfc_tm.install = jiffies; 235 p->tcfc_tm.install = jiffies;
238 p->tcfc_tm.lastuse = jiffies; 236 p->tcfc_tm.lastuse = jiffies;
239#ifdef CONFIG_NET_ESTIMATOR
240 if (est) 237 if (est)
241 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, 238 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
242 p->tcfc_stats_lock, est); 239 p->tcfc_stats_lock, est);
243#endif
244 a->priv = (void *) p; 240 a->priv = (void *) p;
245 return p; 241 return p;
246} 242}
@@ -614,9 +610,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
614 goto errout; 610 goto errout;
615 611
616 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || 612 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
617#ifdef CONFIG_NET_ESTIMATOR
618 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || 613 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
619#endif
620 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) 614 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
621 goto errout; 615 goto errout;
622 616
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 616f465f407e..580698db578a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -118,10 +118,8 @@ void tcf_police_destroy(struct tcf_police *p)
118 write_lock_bh(&police_lock); 118 write_lock_bh(&police_lock);
119 *p1p = p->tcf_next; 119 *p1p = p->tcf_next;
120 write_unlock_bh(&police_lock); 120 write_unlock_bh(&police_lock);
121#ifdef CONFIG_NET_ESTIMATOR
122 gen_kill_estimator(&p->tcf_bstats, 121 gen_kill_estimator(&p->tcf_bstats,
123 &p->tcf_rate_est); 122 &p->tcf_rate_est);
124#endif
125 if (p->tcfp_R_tab) 123 if (p->tcfp_R_tab)
126 qdisc_put_rtab(p->tcfp_R_tab); 124 qdisc_put_rtab(p->tcfp_R_tab);
127 if (p->tcfp_P_tab) 125 if (p->tcfp_P_tab)
@@ -227,7 +225,6 @@ override:
227 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); 225 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
228 police->tcf_action = parm->action; 226 police->tcf_action = parm->action;
229 227
230#ifdef CONFIG_NET_ESTIMATOR
231 if (tb[TCA_POLICE_AVRATE-1]) 228 if (tb[TCA_POLICE_AVRATE-1])
232 police->tcfp_ewma_rate = 229 police->tcfp_ewma_rate =
233 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 230 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
@@ -235,7 +232,6 @@ override:
235 gen_replace_estimator(&police->tcf_bstats, 232 gen_replace_estimator(&police->tcf_bstats,
236 &police->tcf_rate_est, 233 &police->tcf_rate_est,
237 police->tcf_stats_lock, est); 234 police->tcf_stats_lock, est);
238#endif
239 235
240 spin_unlock_bh(&police->tcf_lock); 236 spin_unlock_bh(&police->tcf_lock);
241 if (ret != ACT_P_CREATED) 237 if (ret != ACT_P_CREATED)
@@ -281,14 +277,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
281 police->tcf_bstats.bytes += skb->len; 277 police->tcf_bstats.bytes += skb->len;
282 police->tcf_bstats.packets++; 278 police->tcf_bstats.packets++;
283 279
284#ifdef CONFIG_NET_ESTIMATOR
285 if (police->tcfp_ewma_rate && 280 if (police->tcfp_ewma_rate &&
286 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 281 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
287 police->tcf_qstats.overlimits++; 282 police->tcf_qstats.overlimits++;
288 spin_unlock(&police->tcf_lock); 283 spin_unlock(&police->tcf_lock);
289 return police->tcf_action; 284 return police->tcf_action;
290 } 285 }
291#endif
292 286
293 if (skb->len <= police->tcfp_mtu) { 287 if (skb->len <= police->tcfp_mtu) {
294 if (police->tcfp_R_tab == NULL) { 288 if (police->tcfp_R_tab == NULL) {
@@ -348,10 +342,8 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
348 if (police->tcfp_result) 342 if (police->tcfp_result)
349 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 343 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
350 &police->tcfp_result); 344 &police->tcfp_result);
351#ifdef CONFIG_NET_ESTIMATOR
352 if (police->tcfp_ewma_rate) 345 if (police->tcfp_ewma_rate)
353 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 346 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
354#endif
355 return skb->len; 347 return skb->len;
356 348
357rtattr_failure: 349rtattr_failure:
@@ -477,14 +469,12 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
477 goto failure; 469 goto failure;
478 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 470 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
479 } 471 }
480#ifdef CONFIG_NET_ESTIMATOR
481 if (tb[TCA_POLICE_AVRATE-1]) { 472 if (tb[TCA_POLICE_AVRATE-1]) {
482 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32)) 473 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
483 goto failure; 474 goto failure;
484 police->tcfp_ewma_rate = 475 police->tcfp_ewma_rate =
485 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 476 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
486 } 477 }
487#endif
488 police->tcfp_toks = police->tcfp_burst = parm->burst; 478 police->tcfp_toks = police->tcfp_burst = parm->burst;
489 police->tcfp_mtu = parm->mtu; 479 police->tcfp_mtu = parm->mtu;
490 if (police->tcfp_mtu == 0) { 480 if (police->tcfp_mtu == 0) {
@@ -498,11 +488,9 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
498 police->tcf_index = parm->index ? parm->index : 488 police->tcf_index = parm->index ? parm->index :
499 tcf_police_new_index(); 489 tcf_police_new_index();
500 police->tcf_action = parm->action; 490 police->tcf_action = parm->action;
501#ifdef CONFIG_NET_ESTIMATOR
502 if (est) 491 if (est)
503 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est, 492 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
504 police->tcf_stats_lock, est); 493 police->tcf_stats_lock, est);
505#endif
506 h = tcf_hash(police->tcf_index, POL_TAB_MASK); 494 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
507 write_lock_bh(&police_lock); 495 write_lock_bh(&police_lock);
508 police->tcf_next = tcf_police_ht[h]; 496 police->tcf_next = tcf_police_ht[h];
@@ -528,14 +516,12 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police)
528 police->tcf_bstats.bytes += skb->len; 516 police->tcf_bstats.bytes += skb->len;
529 police->tcf_bstats.packets++; 517 police->tcf_bstats.packets++;
530 518
531#ifdef CONFIG_NET_ESTIMATOR
532 if (police->tcfp_ewma_rate && 519 if (police->tcfp_ewma_rate &&
533 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 520 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
534 police->tcf_qstats.overlimits++; 521 police->tcf_qstats.overlimits++;
535 spin_unlock(&police->tcf_lock); 522 spin_unlock(&police->tcf_lock);
536 return police->tcf_action; 523 return police->tcf_action;
537 } 524 }
538#endif
539 if (skb->len <= police->tcfp_mtu) { 525 if (skb->len <= police->tcfp_mtu) {
540 if (police->tcfp_R_tab == NULL) { 526 if (police->tcfp_R_tab == NULL) {
541 spin_unlock(&police->tcf_lock); 527 spin_unlock(&police->tcf_lock);
@@ -591,10 +577,8 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
591 if (police->tcfp_result) 577 if (police->tcfp_result)
592 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 578 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
593 &police->tcfp_result); 579 &police->tcfp_result);
594#ifdef CONFIG_NET_ESTIMATOR
595 if (police->tcfp_ewma_rate) 580 if (police->tcfp_ewma_rate)
596 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 581 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
597#endif
598 return skb->len; 582 return skb->len;
599 583
600rtattr_failure: 584rtattr_failure:
@@ -612,9 +596,7 @@ int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
612 goto errout; 596 goto errout;
613 597
614 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 || 598 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
615#ifdef CONFIG_NET_ESTIMATOR
616 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 || 599 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
617#endif
618 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0) 600 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
619 goto errout; 601 goto errout;
620 602
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bec600af03ca..0f9e1c71746a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -515,7 +515,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
515 sch->handle = handle; 515 sch->handle = handle;
516 516
517 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { 517 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
518#ifdef CONFIG_NET_ESTIMATOR
519 if (tca[TCA_RATE-1]) { 518 if (tca[TCA_RATE-1]) {
520 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 519 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
521 sch->stats_lock, 520 sch->stats_lock,
@@ -531,7 +530,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
531 goto err_out3; 530 goto err_out3;
532 } 531 }
533 } 532 }
534#endif
535 qdisc_lock_tree(dev); 533 qdisc_lock_tree(dev);
536 list_add_tail(&sch->list, &dev->qdisc_list); 534 list_add_tail(&sch->list, &dev->qdisc_list);
537 qdisc_unlock_tree(dev); 535 qdisc_unlock_tree(dev);
@@ -559,11 +557,9 @@ static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
559 if (err) 557 if (err)
560 return err; 558 return err;
561 } 559 }
562#ifdef CONFIG_NET_ESTIMATOR
563 if (tca[TCA_RATE-1]) 560 if (tca[TCA_RATE-1])
564 gen_replace_estimator(&sch->bstats, &sch->rate_est, 561 gen_replace_estimator(&sch->bstats, &sch->rate_est,
565 sch->stats_lock, tca[TCA_RATE-1]); 562 sch->stats_lock, tca[TCA_RATE-1]);
566#endif
567 return 0; 563 return 0;
568} 564}
569 565
@@ -839,9 +835,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
839 goto rtattr_failure; 835 goto rtattr_failure;
840 836
841 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || 837 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
842#ifdef CONFIG_NET_ESTIMATOR
843 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || 838 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
844#endif
845 gnet_stats_copy_queue(&d, &q->qstats) < 0) 839 gnet_stats_copy_queue(&d, &q->qstats) < 0)
846 goto rtattr_failure; 840 goto rtattr_failure;
847 841
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index ee2d5967d109..bf1ea9e75cd9 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1653,9 +1653,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1653 cl->xstats.undertime = cl->undertime - q->now; 1653 cl->xstats.undertime = cl->undertime - q->now;
1654 1654
1655 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1655 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1656#ifdef CONFIG_NET_ESTIMATOR
1657 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1656 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1658#endif
1659 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1657 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1660 return -1; 1658 return -1;
1661 1659
@@ -1726,9 +1724,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1726 tcf_destroy_chain(cl->filter_list); 1724 tcf_destroy_chain(cl->filter_list);
1727 qdisc_destroy(cl->q); 1725 qdisc_destroy(cl->q);
1728 qdisc_put_rtab(cl->R_tab); 1726 qdisc_put_rtab(cl->R_tab);
1729#ifdef CONFIG_NET_ESTIMATOR
1730 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1727 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1731#endif
1732 if (cl != &q->link) 1728 if (cl != &q->link)
1733 kfree(cl); 1729 kfree(cl);
1734} 1730}
@@ -1873,11 +1869,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1873 1869
1874 sch_tree_unlock(sch); 1870 sch_tree_unlock(sch);
1875 1871
1876#ifdef CONFIG_NET_ESTIMATOR
1877 if (tca[TCA_RATE-1]) 1872 if (tca[TCA_RATE-1])
1878 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1873 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1879 cl->stats_lock, tca[TCA_RATE-1]); 1874 cl->stats_lock, tca[TCA_RATE-1]);
1880#endif
1881 return 0; 1875 return 0;
1882 } 1876 }
1883 1877
@@ -1963,11 +1957,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1963 cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1])); 1957 cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
1964 sch_tree_unlock(sch); 1958 sch_tree_unlock(sch);
1965 1959
1966#ifdef CONFIG_NET_ESTIMATOR
1967 if (tca[TCA_RATE-1]) 1960 if (tca[TCA_RATE-1])
1968 gen_new_estimator(&cl->bstats, &cl->rate_est, 1961 gen_new_estimator(&cl->bstats, &cl->rate_est,
1969 cl->stats_lock, tca[TCA_RATE-1]); 1962 cl->stats_lock, tca[TCA_RATE-1]);
1970#endif
1971 1963
1972 *arg = (unsigned long)cl; 1964 *arg = (unsigned long)cl;
1973 return 0; 1965 return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2488dbb17b60..e525fd723c12 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -514,9 +514,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
514 return; 514 return;
515 515
516 list_del(&qdisc->list); 516 list_del(&qdisc->list);
517#ifdef CONFIG_NET_ESTIMATOR
518 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 517 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
519#endif
520 if (ops->reset) 518 if (ops->reset)
521 ops->reset(qdisc); 519 ops->reset(qdisc);
522 if (ops->destroy) 520 if (ops->destroy)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9d124c4ee3a7..7ccdf63a0cb5 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1054,11 +1054,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1054 } 1054 }
1055 sch_tree_unlock(sch); 1055 sch_tree_unlock(sch);
1056 1056
1057#ifdef CONFIG_NET_ESTIMATOR
1058 if (tca[TCA_RATE-1]) 1057 if (tca[TCA_RATE-1])
1059 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1058 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1060 cl->stats_lock, tca[TCA_RATE-1]); 1059 cl->stats_lock, tca[TCA_RATE-1]);
1061#endif
1062 return 0; 1060 return 0;
1063 } 1061 }
1064 1062
@@ -1112,11 +1110,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1112 cl->cl_pcvtoff = parent->cl_cvtoff; 1110 cl->cl_pcvtoff = parent->cl_cvtoff;
1113 sch_tree_unlock(sch); 1111 sch_tree_unlock(sch);
1114 1112
1115#ifdef CONFIG_NET_ESTIMATOR
1116 if (tca[TCA_RATE-1]) 1113 if (tca[TCA_RATE-1])
1117 gen_new_estimator(&cl->bstats, &cl->rate_est, 1114 gen_new_estimator(&cl->bstats, &cl->rate_est,
1118 cl->stats_lock, tca[TCA_RATE-1]); 1115 cl->stats_lock, tca[TCA_RATE-1]);
1119#endif
1120 *arg = (unsigned long)cl; 1116 *arg = (unsigned long)cl;
1121 return 0; 1117 return 0;
1122} 1118}
@@ -1128,9 +1124,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1128 1124
1129 tcf_destroy_chain(cl->filter_list); 1125 tcf_destroy_chain(cl->filter_list);
1130 qdisc_destroy(cl->qdisc); 1126 qdisc_destroy(cl->qdisc);
1131#ifdef CONFIG_NET_ESTIMATOR
1132 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1127 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1133#endif
1134 if (cl != &q->root) 1128 if (cl != &q->root)
1135 kfree(cl); 1129 kfree(cl);
1136} 1130}
@@ -1384,9 +1378,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1384 xstats.rtwork = cl->cl_cumul; 1378 xstats.rtwork = cl->cl_cumul;
1385 1379
1386 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1380 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1387#ifdef CONFIG_NET_ESTIMATOR
1388 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1381 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1389#endif
1390 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1382 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1391 return -1; 1383 return -1;
1392 1384