aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/act_police.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-07-03 01:46:07 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-11 01:16:37 -0400
commit876d48aabf30e4981653f1a0a7ae1e262b8c8b6f (patch)
tree49dace46f70bc243605ecf73af4a3f06e607a2be /net/sched/act_police.c
parenta553e4a6317b2cfc7659542c10fe43184ffe53da (diff)
[NET_SCHED]: Remove CONFIG_NET_ESTIMATOR option
The generic estimator is always built in anways and all the config options does is prevent including a minimal amount of code for setting it up. Additionally the option is already automatically selected for most cases. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/act_police.c')
-rw-r--r--net/sched/act_police.c18
1 files changed, 0 insertions, 18 deletions
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 616f465f407e..580698db578a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -118,10 +118,8 @@ void tcf_police_destroy(struct tcf_police *p)
118 write_lock_bh(&police_lock); 118 write_lock_bh(&police_lock);
119 *p1p = p->tcf_next; 119 *p1p = p->tcf_next;
120 write_unlock_bh(&police_lock); 120 write_unlock_bh(&police_lock);
121#ifdef CONFIG_NET_ESTIMATOR
122 gen_kill_estimator(&p->tcf_bstats, 121 gen_kill_estimator(&p->tcf_bstats,
123 &p->tcf_rate_est); 122 &p->tcf_rate_est);
124#endif
125 if (p->tcfp_R_tab) 123 if (p->tcfp_R_tab)
126 qdisc_put_rtab(p->tcfp_R_tab); 124 qdisc_put_rtab(p->tcfp_R_tab);
127 if (p->tcfp_P_tab) 125 if (p->tcfp_P_tab)
@@ -227,7 +225,6 @@ override:
227 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); 225 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
228 police->tcf_action = parm->action; 226 police->tcf_action = parm->action;
229 227
230#ifdef CONFIG_NET_ESTIMATOR
231 if (tb[TCA_POLICE_AVRATE-1]) 228 if (tb[TCA_POLICE_AVRATE-1])
232 police->tcfp_ewma_rate = 229 police->tcfp_ewma_rate =
233 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 230 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
@@ -235,7 +232,6 @@ override:
235 gen_replace_estimator(&police->tcf_bstats, 232 gen_replace_estimator(&police->tcf_bstats,
236 &police->tcf_rate_est, 233 &police->tcf_rate_est,
237 police->tcf_stats_lock, est); 234 police->tcf_stats_lock, est);
238#endif
239 235
240 spin_unlock_bh(&police->tcf_lock); 236 spin_unlock_bh(&police->tcf_lock);
241 if (ret != ACT_P_CREATED) 237 if (ret != ACT_P_CREATED)
@@ -281,14 +277,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
281 police->tcf_bstats.bytes += skb->len; 277 police->tcf_bstats.bytes += skb->len;
282 police->tcf_bstats.packets++; 278 police->tcf_bstats.packets++;
283 279
284#ifdef CONFIG_NET_ESTIMATOR
285 if (police->tcfp_ewma_rate && 280 if (police->tcfp_ewma_rate &&
286 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 281 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
287 police->tcf_qstats.overlimits++; 282 police->tcf_qstats.overlimits++;
288 spin_unlock(&police->tcf_lock); 283 spin_unlock(&police->tcf_lock);
289 return police->tcf_action; 284 return police->tcf_action;
290 } 285 }
291#endif
292 286
293 if (skb->len <= police->tcfp_mtu) { 287 if (skb->len <= police->tcfp_mtu) {
294 if (police->tcfp_R_tab == NULL) { 288 if (police->tcfp_R_tab == NULL) {
@@ -348,10 +342,8 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
348 if (police->tcfp_result) 342 if (police->tcfp_result)
349 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 343 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
350 &police->tcfp_result); 344 &police->tcfp_result);
351#ifdef CONFIG_NET_ESTIMATOR
352 if (police->tcfp_ewma_rate) 345 if (police->tcfp_ewma_rate)
353 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 346 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
354#endif
355 return skb->len; 347 return skb->len;
356 348
357rtattr_failure: 349rtattr_failure:
@@ -477,14 +469,12 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
477 goto failure; 469 goto failure;
478 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 470 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
479 } 471 }
480#ifdef CONFIG_NET_ESTIMATOR
481 if (tb[TCA_POLICE_AVRATE-1]) { 472 if (tb[TCA_POLICE_AVRATE-1]) {
482 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32)) 473 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
483 goto failure; 474 goto failure;
484 police->tcfp_ewma_rate = 475 police->tcfp_ewma_rate =
485 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 476 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
486 } 477 }
487#endif
488 police->tcfp_toks = police->tcfp_burst = parm->burst; 478 police->tcfp_toks = police->tcfp_burst = parm->burst;
489 police->tcfp_mtu = parm->mtu; 479 police->tcfp_mtu = parm->mtu;
490 if (police->tcfp_mtu == 0) { 480 if (police->tcfp_mtu == 0) {
@@ -498,11 +488,9 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
498 police->tcf_index = parm->index ? parm->index : 488 police->tcf_index = parm->index ? parm->index :
499 tcf_police_new_index(); 489 tcf_police_new_index();
500 police->tcf_action = parm->action; 490 police->tcf_action = parm->action;
501#ifdef CONFIG_NET_ESTIMATOR
502 if (est) 491 if (est)
503 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est, 492 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
504 police->tcf_stats_lock, est); 493 police->tcf_stats_lock, est);
505#endif
506 h = tcf_hash(police->tcf_index, POL_TAB_MASK); 494 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
507 write_lock_bh(&police_lock); 495 write_lock_bh(&police_lock);
508 police->tcf_next = tcf_police_ht[h]; 496 police->tcf_next = tcf_police_ht[h];
@@ -528,14 +516,12 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police)
528 police->tcf_bstats.bytes += skb->len; 516 police->tcf_bstats.bytes += skb->len;
529 police->tcf_bstats.packets++; 517 police->tcf_bstats.packets++;
530 518
531#ifdef CONFIG_NET_ESTIMATOR
532 if (police->tcfp_ewma_rate && 519 if (police->tcfp_ewma_rate &&
533 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 520 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
534 police->tcf_qstats.overlimits++; 521 police->tcf_qstats.overlimits++;
535 spin_unlock(&police->tcf_lock); 522 spin_unlock(&police->tcf_lock);
536 return police->tcf_action; 523 return police->tcf_action;
537 } 524 }
538#endif
539 if (skb->len <= police->tcfp_mtu) { 525 if (skb->len <= police->tcfp_mtu) {
540 if (police->tcfp_R_tab == NULL) { 526 if (police->tcfp_R_tab == NULL) {
541 spin_unlock(&police->tcf_lock); 527 spin_unlock(&police->tcf_lock);
@@ -591,10 +577,8 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
591 if (police->tcfp_result) 577 if (police->tcfp_result)
592 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 578 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
593 &police->tcfp_result); 579 &police->tcfp_result);
594#ifdef CONFIG_NET_ESTIMATOR
595 if (police->tcfp_ewma_rate) 580 if (police->tcfp_ewma_rate)
596 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 581 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
597#endif
598 return skb->len; 582 return skb->len;
599 583
600rtattr_failure: 584rtattr_failure:
@@ -612,9 +596,7 @@ int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
612 goto errout; 596 goto errout;
613 597
614 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 || 598 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
615#ifdef CONFIG_NET_ESTIMATOR
616 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 || 599 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
617#endif
618 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0) 600 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
619 goto errout; 601 goto errout;
620 602