diff options
author | Patrick McHardy <kaber@trash.net> | 2007-07-15 03:03:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-07-15 03:03:05 -0400 |
commit | c3bc7cff8fddb6ff9715be8bfc3d911378c4d69d (patch) | |
tree | e23946fb3583ee17e95c07f6e04b5dcc498fa581 /net | |
parent | 73ca4918fbb98311421259d82ef4ab44feeace43 (diff) |
[NET_SCHED]: Kill CONFIG_NET_CLS_POLICE
The NET_CLS_ACT option is now a full replacement for NET_CLS_POLICE,
remove the old code. The config option will be kept around to select
the equivalent NET_CLS_ACT options for a short time to allow easier
upgrades.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/Kconfig | 8 | ||||
-rw-r--r-- | net/sched/Makefile | 1 | ||||
-rw-r--r-- | net/sched/act_police.c | 246 | ||||
-rw-r--r-- | net/sched/cls_api.c | 40 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 3 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 19 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 45 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 13 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 3 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 3 | ||||
-rw-r--r-- | net/sched/sch_ingress.c | 19 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 2 |
12 files changed, 37 insertions, 365 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index b4662888bdbd..d3f7c3f9407a 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -472,12 +472,12 @@ config NET_ACT_SIMP | |||
472 | 472 | ||
473 | config NET_CLS_POLICE | 473 | config NET_CLS_POLICE |
474 | bool "Traffic Policing (obsolete)" | 474 | bool "Traffic Policing (obsolete)" |
475 | depends on NET_CLS_ACT!=y | 475 | select NET_CLS_ACT |
476 | select NET_ACT_POLICE | ||
476 | ---help--- | 477 | ---help--- |
477 | Say Y here if you want to do traffic policing, i.e. strict | 478 | Say Y here if you want to do traffic policing, i.e. strict |
478 | bandwidth limiting. This option is obsoleted by the traffic | 479 | bandwidth limiting. This option is obsolete and just selects |
479 | policer implemented as action, it stays here for compatibility | 480 | the option replacing it. It will be removed in the future. |
480 | reasons. | ||
481 | 481 | ||
482 | config NET_CLS_IND | 482 | config NET_CLS_IND |
483 | bool "Incoming device classification" | 483 | bool "Incoming device classification" |
diff --git a/net/sched/Makefile b/net/sched/Makefile index 020767a204d4..b67c36f65cf2 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile | |||
@@ -8,7 +8,6 @@ obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o | |||
8 | obj-$(CONFIG_NET_CLS) += cls_api.o | 8 | obj-$(CONFIG_NET_CLS) += cls_api.o |
9 | obj-$(CONFIG_NET_CLS_ACT) += act_api.o | 9 | obj-$(CONFIG_NET_CLS_ACT) += act_api.o |
10 | obj-$(CONFIG_NET_ACT_POLICE) += act_police.o | 10 | obj-$(CONFIG_NET_ACT_POLICE) += act_police.o |
11 | obj-$(CONFIG_NET_CLS_POLICE) += act_police.o | ||
12 | obj-$(CONFIG_NET_ACT_GACT) += act_gact.o | 11 | obj-$(CONFIG_NET_ACT_GACT) += act_gact.o |
13 | obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o | 12 | obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o |
14 | obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o | 13 | obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index d20403890877..bf90e60f8411 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -50,7 +50,6 @@ struct tc_police_compat | |||
50 | 50 | ||
51 | /* Each policer is serialized by its individual spinlock */ | 51 | /* Each policer is serialized by its individual spinlock */ |
52 | 52 | ||
53 | #ifdef CONFIG_NET_CLS_ACT | ||
54 | static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, | 53 | static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, |
55 | int type, struct tc_action *a) | 54 | int type, struct tc_action *a) |
56 | { | 55 | { |
@@ -96,9 +95,8 @@ rtattr_failure: | |||
96 | nlmsg_trim(skb, r); | 95 | nlmsg_trim(skb, r); |
97 | goto done; | 96 | goto done; |
98 | } | 97 | } |
99 | #endif | ||
100 | 98 | ||
101 | void tcf_police_destroy(struct tcf_police *p) | 99 | static void tcf_police_destroy(struct tcf_police *p) |
102 | { | 100 | { |
103 | unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); | 101 | unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); |
104 | struct tcf_common **p1p; | 102 | struct tcf_common **p1p; |
@@ -121,7 +119,6 @@ void tcf_police_destroy(struct tcf_police *p) | |||
121 | BUG_TRAP(0); | 119 | BUG_TRAP(0); |
122 | } | 120 | } |
123 | 121 | ||
124 | #ifdef CONFIG_NET_CLS_ACT | ||
125 | static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, | 122 | static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, |
126 | struct tc_action *a, int ovr, int bind) | 123 | struct tc_action *a, int ovr, int bind) |
127 | { | 124 | { |
@@ -247,10 +244,19 @@ failure: | |||
247 | static int tcf_act_police_cleanup(struct tc_action *a, int bind) | 244 | static int tcf_act_police_cleanup(struct tc_action *a, int bind) |
248 | { | 245 | { |
249 | struct tcf_police *p = a->priv; | 246 | struct tcf_police *p = a->priv; |
247 | int ret = 0; | ||
250 | 248 | ||
251 | if (p != NULL) | 249 | if (p != NULL) { |
252 | return tcf_police_release(p, bind); | 250 | if (bind) |
253 | return 0; | 251 | p->tcf_bindcnt--; |
252 | |||
253 | p->tcf_refcnt--; | ||
254 | if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) { | ||
255 | tcf_police_destroy(p); | ||
256 | ret = 1; | ||
257 | } | ||
258 | } | ||
259 | return ret; | ||
254 | } | 260 | } |
255 | 261 | ||
256 | static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | 262 | static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, |
@@ -372,229 +378,3 @@ police_cleanup_module(void) | |||
372 | 378 | ||
373 | module_init(police_init_module); | 379 | module_init(police_init_module); |
374 | module_exit(police_cleanup_module); | 380 | module_exit(police_cleanup_module); |
375 | |||
376 | #else /* CONFIG_NET_CLS_ACT */ | ||
377 | |||
378 | static struct tcf_common *tcf_police_lookup(u32 index) | ||
379 | { | ||
380 | struct tcf_hashinfo *hinfo = &police_hash_info; | ||
381 | struct tcf_common *p; | ||
382 | |||
383 | read_lock(hinfo->lock); | ||
384 | for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; | ||
385 | p = p->tcfc_next) { | ||
386 | if (p->tcfc_index == index) | ||
387 | break; | ||
388 | } | ||
389 | read_unlock(hinfo->lock); | ||
390 | |||
391 | return p; | ||
392 | } | ||
393 | |||
394 | static u32 tcf_police_new_index(void) | ||
395 | { | ||
396 | u32 *idx_gen = &police_idx_gen; | ||
397 | u32 val = *idx_gen; | ||
398 | |||
399 | do { | ||
400 | if (++val == 0) | ||
401 | val = 1; | ||
402 | } while (tcf_police_lookup(val)); | ||
403 | |||
404 | return (*idx_gen = val); | ||
405 | } | ||
406 | |||
407 | struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est) | ||
408 | { | ||
409 | unsigned int h; | ||
410 | struct tcf_police *police; | ||
411 | struct rtattr *tb[TCA_POLICE_MAX]; | ||
412 | struct tc_police *parm; | ||
413 | int size; | ||
414 | |||
415 | if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) | ||
416 | return NULL; | ||
417 | |||
418 | if (tb[TCA_POLICE_TBF-1] == NULL) | ||
419 | return NULL; | ||
420 | size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]); | ||
421 | if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) | ||
422 | return NULL; | ||
423 | |||
424 | parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); | ||
425 | |||
426 | if (parm->index) { | ||
427 | struct tcf_common *pc; | ||
428 | |||
429 | pc = tcf_police_lookup(parm->index); | ||
430 | if (pc) { | ||
431 | police = to_police(pc); | ||
432 | police->tcf_refcnt++; | ||
433 | return police; | ||
434 | } | ||
435 | } | ||
436 | police = kzalloc(sizeof(*police), GFP_KERNEL); | ||
437 | if (unlikely(!police)) | ||
438 | return NULL; | ||
439 | |||
440 | police->tcf_refcnt = 1; | ||
441 | spin_lock_init(&police->tcf_lock); | ||
442 | if (parm->rate.rate) { | ||
443 | police->tcfp_R_tab = | ||
444 | qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); | ||
445 | if (police->tcfp_R_tab == NULL) | ||
446 | goto failure; | ||
447 | if (parm->peakrate.rate) { | ||
448 | police->tcfp_P_tab = | ||
449 | qdisc_get_rtab(&parm->peakrate, | ||
450 | tb[TCA_POLICE_PEAKRATE-1]); | ||
451 | if (police->tcfp_P_tab == NULL) | ||
452 | goto failure; | ||
453 | } | ||
454 | } | ||
455 | if (tb[TCA_POLICE_RESULT-1]) { | ||
456 | if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32)) | ||
457 | goto failure; | ||
458 | police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); | ||
459 | } | ||
460 | if (tb[TCA_POLICE_AVRATE-1]) { | ||
461 | if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32)) | ||
462 | goto failure; | ||
463 | police->tcfp_ewma_rate = | ||
464 | *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); | ||
465 | } | ||
466 | police->tcfp_toks = police->tcfp_burst = parm->burst; | ||
467 | police->tcfp_mtu = parm->mtu; | ||
468 | if (police->tcfp_mtu == 0) { | ||
469 | police->tcfp_mtu = ~0; | ||
470 | if (police->tcfp_R_tab) | ||
471 | police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log; | ||
472 | } | ||
473 | if (police->tcfp_P_tab) | ||
474 | police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); | ||
475 | police->tcfp_t_c = psched_get_time(); | ||
476 | police->tcf_index = parm->index ? parm->index : | ||
477 | tcf_police_new_index(); | ||
478 | police->tcf_action = parm->action; | ||
479 | if (est) | ||
480 | gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est, | ||
481 | &police->tcf_lock, est); | ||
482 | h = tcf_hash(police->tcf_index, POL_TAB_MASK); | ||
483 | write_lock_bh(&police_lock); | ||
484 | police->tcf_next = tcf_police_ht[h]; | ||
485 | tcf_police_ht[h] = &police->common; | ||
486 | write_unlock_bh(&police_lock); | ||
487 | return police; | ||
488 | |||
489 | failure: | ||
490 | if (police->tcfp_R_tab) | ||
491 | qdisc_put_rtab(police->tcfp_R_tab); | ||
492 | kfree(police); | ||
493 | return NULL; | ||
494 | } | ||
495 | |||
496 | int tcf_police(struct sk_buff *skb, struct tcf_police *police) | ||
497 | { | ||
498 | psched_time_t now; | ||
499 | long toks; | ||
500 | long ptoks = 0; | ||
501 | |||
502 | spin_lock(&police->tcf_lock); | ||
503 | |||
504 | police->tcf_bstats.bytes += skb->len; | ||
505 | police->tcf_bstats.packets++; | ||
506 | |||
507 | if (police->tcfp_ewma_rate && | ||
508 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { | ||
509 | police->tcf_qstats.overlimits++; | ||
510 | spin_unlock(&police->tcf_lock); | ||
511 | return police->tcf_action; | ||
512 | } | ||
513 | if (skb->len <= police->tcfp_mtu) { | ||
514 | if (police->tcfp_R_tab == NULL) { | ||
515 | spin_unlock(&police->tcf_lock); | ||
516 | return police->tcfp_result; | ||
517 | } | ||
518 | |||
519 | now = psched_get_time(); | ||
520 | toks = psched_tdiff_bounded(now, police->tcfp_t_c, | ||
521 | police->tcfp_burst); | ||
522 | if (police->tcfp_P_tab) { | ||
523 | ptoks = toks + police->tcfp_ptoks; | ||
524 | if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) | ||
525 | ptoks = (long)L2T_P(police, police->tcfp_mtu); | ||
526 | ptoks -= L2T_P(police, skb->len); | ||
527 | } | ||
528 | toks += police->tcfp_toks; | ||
529 | if (toks > (long)police->tcfp_burst) | ||
530 | toks = police->tcfp_burst; | ||
531 | toks -= L2T(police, skb->len); | ||
532 | if ((toks|ptoks) >= 0) { | ||
533 | police->tcfp_t_c = now; | ||
534 | police->tcfp_toks = toks; | ||
535 | police->tcfp_ptoks = ptoks; | ||
536 | spin_unlock(&police->tcf_lock); | ||
537 | return police->tcfp_result; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | police->tcf_qstats.overlimits++; | ||
542 | spin_unlock(&police->tcf_lock); | ||
543 | return police->tcf_action; | ||
544 | } | ||
545 | EXPORT_SYMBOL(tcf_police); | ||
546 | |||
547 | int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police) | ||
548 | { | ||
549 | unsigned char *b = skb_tail_pointer(skb); | ||
550 | struct tc_police opt; | ||
551 | |||
552 | opt.index = police->tcf_index; | ||
553 | opt.action = police->tcf_action; | ||
554 | opt.mtu = police->tcfp_mtu; | ||
555 | opt.burst = police->tcfp_burst; | ||
556 | if (police->tcfp_R_tab) | ||
557 | opt.rate = police->tcfp_R_tab->rate; | ||
558 | else | ||
559 | memset(&opt.rate, 0, sizeof(opt.rate)); | ||
560 | if (police->tcfp_P_tab) | ||
561 | opt.peakrate = police->tcfp_P_tab->rate; | ||
562 | else | ||
563 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); | ||
564 | RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); | ||
565 | if (police->tcfp_result) | ||
566 | RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), | ||
567 | &police->tcfp_result); | ||
568 | if (police->tcfp_ewma_rate) | ||
569 | RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); | ||
570 | return skb->len; | ||
571 | |||
572 | rtattr_failure: | ||
573 | nlmsg_trim(skb, b); | ||
574 | return -1; | ||
575 | } | ||
576 | |||
577 | int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police) | ||
578 | { | ||
579 | struct gnet_dump d; | ||
580 | |||
581 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, | ||
582 | TCA_XSTATS, &police->tcf_lock, | ||
583 | &d) < 0) | ||
584 | goto errout; | ||
585 | |||
586 | if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 || | ||
587 | gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 || | ||
588 | gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0) | ||
589 | goto errout; | ||
590 | |||
591 | if (gnet_stats_finish_copy(&d) < 0) | ||
592 | goto errout; | ||
593 | |||
594 | return 0; | ||
595 | |||
596 | errout: | ||
597 | return -1; | ||
598 | } | ||
599 | |||
600 | #endif /* CONFIG_NET_CLS_ACT */ | ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 36b72aab1bde..5f0fbca7393f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -458,11 +458,6 @@ tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts) | |||
458 | tcf_action_destroy(exts->action, TCA_ACT_UNBIND); | 458 | tcf_action_destroy(exts->action, TCA_ACT_UNBIND); |
459 | exts->action = NULL; | 459 | exts->action = NULL; |
460 | } | 460 | } |
461 | #elif defined CONFIG_NET_CLS_POLICE | ||
462 | if (exts->police) { | ||
463 | tcf_police_release(exts->police, TCA_ACT_UNBIND); | ||
464 | exts->police = NULL; | ||
465 | } | ||
466 | #endif | 461 | #endif |
467 | } | 462 | } |
468 | 463 | ||
@@ -496,17 +491,6 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb, | |||
496 | exts->action = act; | 491 | exts->action = act; |
497 | } | 492 | } |
498 | } | 493 | } |
499 | #elif defined CONFIG_NET_CLS_POLICE | ||
500 | if (map->police && tb[map->police-1]) { | ||
501 | struct tcf_police *p; | ||
502 | |||
503 | p = tcf_police_locate(tb[map->police-1], rate_tlv); | ||
504 | if (p == NULL) | ||
505 | return -EINVAL; | ||
506 | |||
507 | exts->police = p; | ||
508 | } else if (map->action && tb[map->action-1]) | ||
509 | return -EOPNOTSUPP; | ||
510 | #else | 494 | #else |
511 | if ((map->action && tb[map->action-1]) || | 495 | if ((map->action && tb[map->action-1]) || |
512 | (map->police && tb[map->police-1])) | 496 | (map->police && tb[map->police-1])) |
@@ -529,15 +513,6 @@ tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, | |||
529 | if (act) | 513 | if (act) |
530 | tcf_action_destroy(act, TCA_ACT_UNBIND); | 514 | tcf_action_destroy(act, TCA_ACT_UNBIND); |
531 | } | 515 | } |
532 | #elif defined CONFIG_NET_CLS_POLICE | ||
533 | if (src->police) { | ||
534 | struct tcf_police *p; | ||
535 | tcf_tree_lock(tp); | ||
536 | p = xchg(&dst->police, src->police); | ||
537 | tcf_tree_unlock(tp); | ||
538 | if (p) | ||
539 | tcf_police_release(p, TCA_ACT_UNBIND); | ||
540 | } | ||
541 | #endif | 516 | #endif |
542 | } | 517 | } |
543 | 518 | ||
@@ -566,17 +541,6 @@ tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, | |||
566 | p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; | 541 | p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; |
567 | } | 542 | } |
568 | } | 543 | } |
569 | #elif defined CONFIG_NET_CLS_POLICE | ||
570 | if (map->police && exts->police) { | ||
571 | struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb); | ||
572 | |||
573 | RTA_PUT(skb, map->police, 0, NULL); | ||
574 | |||
575 | if (tcf_police_dump(skb, exts->police) < 0) | ||
576 | goto rtattr_failure; | ||
577 | |||
578 | p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; | ||
579 | } | ||
580 | #endif | 544 | #endif |
581 | return 0; | 545 | return 0; |
582 | rtattr_failure: __attribute__ ((unused)) | 546 | rtattr_failure: __attribute__ ((unused)) |
@@ -591,10 +555,6 @@ tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, | |||
591 | if (exts->action) | 555 | if (exts->action) |
592 | if (tcf_action_copy_stats(skb, exts->action, 1) < 0) | 556 | if (tcf_action_copy_stats(skb, exts->action, 1) < 0) |
593 | goto rtattr_failure; | 557 | goto rtattr_failure; |
594 | #elif defined CONFIG_NET_CLS_POLICE | ||
595 | if (exts->police) | ||
596 | if (tcf_police_dump_stats(skb, exts->police) < 0) | ||
597 | goto rtattr_failure; | ||
598 | #endif | 558 | #endif |
599 | return 0; | 559 | return 0; |
600 | rtattr_failure: __attribute__ ((unused)) | 560 | rtattr_failure: __attribute__ ((unused)) |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 77961e2314dc..8dbe36912ecb 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -782,9 +782,6 @@ static int __init init_u32(void) | |||
782 | #ifdef CONFIG_CLS_U32_PERF | 782 | #ifdef CONFIG_CLS_U32_PERF |
783 | printk(" Performance counters on\n"); | 783 | printk(" Performance counters on\n"); |
784 | #endif | 784 | #endif |
785 | #ifdef CONFIG_NET_CLS_POLICE | ||
786 | printk(" OLD policer on \n"); | ||
787 | #endif | ||
788 | #ifdef CONFIG_NET_CLS_IND | 785 | #ifdef CONFIG_NET_CLS_IND |
789 | printk(" input device check on \n"); | 786 | printk(" input device check on \n"); |
790 | #endif | 787 | #endif |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 37ae6d1deb14..417ec8fb7f1a 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -428,26 +428,9 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
428 | ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP; | 428 | ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP; |
429 | break; | 429 | break; |
430 | } | 430 | } |
431 | #elif defined(CONFIG_NET_CLS_POLICE) | ||
432 | switch (result) { | ||
433 | case TC_POLICE_SHOT: | ||
434 | kfree_skb(skb); | ||
435 | goto drop; | ||
436 | case TC_POLICE_RECLASSIFY: | ||
437 | if (flow->excess) | ||
438 | flow = flow->excess; | ||
439 | else { | ||
440 | ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP; | ||
441 | break; | ||
442 | } | ||
443 | /* fall through */ | ||
444 | case TC_POLICE_OK: | ||
445 | /* fall through */ | ||
446 | default: | ||
447 | break; | ||
448 | } | ||
449 | #endif | 431 | #endif |
450 | } | 432 | } |
433 | |||
451 | if ((ret = flow->q->enqueue(skb, flow->q)) != 0) { | 434 | if ((ret = flow->q->enqueue(skb, flow->q)) != 0) { |
452 | drop: __maybe_unused | 435 | drop: __maybe_unused |
453 | sch->qstats.drops++; | 436 | sch->qstats.drops++; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 77381f1c6541..e38c2839b25c 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -82,7 +82,7 @@ struct cbq_class | |||
82 | unsigned char priority2; /* priority to be used after overlimit */ | 82 | unsigned char priority2; /* priority to be used after overlimit */ |
83 | unsigned char ewma_log; /* time constant for idle time calculation */ | 83 | unsigned char ewma_log; /* time constant for idle time calculation */ |
84 | unsigned char ovl_strategy; | 84 | unsigned char ovl_strategy; |
85 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 85 | #ifdef CONFIG_NET_CLS_ACT |
86 | unsigned char police; | 86 | unsigned char police; |
87 | #endif | 87 | #endif |
88 | 88 | ||
@@ -154,7 +154,7 @@ struct cbq_sched_data | |||
154 | struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes | 154 | struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes |
155 | with backlog */ | 155 | with backlog */ |
156 | 156 | ||
157 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 157 | #ifdef CONFIG_NET_CLS_ACT |
158 | struct cbq_class *rx_class; | 158 | struct cbq_class *rx_class; |
159 | #endif | 159 | #endif |
160 | struct cbq_class *tx_class; | 160 | struct cbq_class *tx_class; |
@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid) | |||
196 | return NULL; | 196 | return NULL; |
197 | } | 197 | } |
198 | 198 | ||
199 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 199 | #ifdef CONFIG_NET_CLS_ACT |
200 | 200 | ||
201 | static struct cbq_class * | 201 | static struct cbq_class * |
202 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) | 202 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) |
@@ -271,15 +271,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
271 | case TC_ACT_RECLASSIFY: | 271 | case TC_ACT_RECLASSIFY: |
272 | return cbq_reclassify(skb, cl); | 272 | return cbq_reclassify(skb, cl); |
273 | } | 273 | } |
274 | #elif defined(CONFIG_NET_CLS_POLICE) | ||
275 | switch (result) { | ||
276 | case TC_POLICE_RECLASSIFY: | ||
277 | return cbq_reclassify(skb, cl); | ||
278 | case TC_POLICE_SHOT: | ||
279 | return NULL; | ||
280 | default: | ||
281 | break; | ||
282 | } | ||
283 | #endif | 274 | #endif |
284 | if (cl->level == 0) | 275 | if (cl->level == 0) |
285 | return cl; | 276 | return cl; |
@@ -392,7 +383,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
392 | int ret; | 383 | int ret; |
393 | struct cbq_class *cl = cbq_classify(skb, sch, &ret); | 384 | struct cbq_class *cl = cbq_classify(skb, sch, &ret); |
394 | 385 | ||
395 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 386 | #ifdef CONFIG_NET_CLS_ACT |
396 | q->rx_class = cl; | 387 | q->rx_class = cl; |
397 | #endif | 388 | #endif |
398 | if (cl == NULL) { | 389 | if (cl == NULL) { |
@@ -402,7 +393,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
402 | return ret; | 393 | return ret; |
403 | } | 394 | } |
404 | 395 | ||
405 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 396 | #ifdef CONFIG_NET_CLS_ACT |
406 | cl->q->__parent = sch; | 397 | cl->q->__parent = sch; |
407 | #endif | 398 | #endif |
408 | if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { | 399 | if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { |
@@ -437,7 +428,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
437 | 428 | ||
438 | cbq_mark_toplevel(q, cl); | 429 | cbq_mark_toplevel(q, cl); |
439 | 430 | ||
440 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 431 | #ifdef CONFIG_NET_CLS_ACT |
441 | q->rx_class = cl; | 432 | q->rx_class = cl; |
442 | cl->q->__parent = sch; | 433 | cl->q->__parent = sch; |
443 | #endif | 434 | #endif |
@@ -672,9 +663,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
672 | return HRTIMER_NORESTART; | 663 | return HRTIMER_NORESTART; |
673 | } | 664 | } |
674 | 665 | ||
675 | 666 | #ifdef CONFIG_NET_CLS_ACT | |
676 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | ||
677 | |||
678 | static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | 667 | static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) |
679 | { | 668 | { |
680 | int len = skb->len; | 669 | int len = skb->len; |
@@ -1367,7 +1356,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) | |||
1367 | return 0; | 1356 | return 0; |
1368 | } | 1357 | } |
1369 | 1358 | ||
1370 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1359 | #ifdef CONFIG_NET_CLS_ACT |
1371 | static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) | 1360 | static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) |
1372 | { | 1361 | { |
1373 | cl->police = p->police; | 1362 | cl->police = p->police; |
@@ -1535,7 +1524,7 @@ rtattr_failure: | |||
1535 | return -1; | 1524 | return -1; |
1536 | } | 1525 | } |
1537 | 1526 | ||
1538 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1527 | #ifdef CONFIG_NET_CLS_ACT |
1539 | static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | 1528 | static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) |
1540 | { | 1529 | { |
1541 | unsigned char *b = skb_tail_pointer(skb); | 1530 | unsigned char *b = skb_tail_pointer(skb); |
@@ -1561,7 +1550,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) | |||
1561 | cbq_dump_rate(skb, cl) < 0 || | 1550 | cbq_dump_rate(skb, cl) < 0 || |
1562 | cbq_dump_wrr(skb, cl) < 0 || | 1551 | cbq_dump_wrr(skb, cl) < 0 || |
1563 | cbq_dump_ovl(skb, cl) < 0 || | 1552 | cbq_dump_ovl(skb, cl) < 0 || |
1564 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1553 | #ifdef CONFIG_NET_CLS_ACT |
1565 | cbq_dump_police(skb, cl) < 0 || | 1554 | cbq_dump_police(skb, cl) < 0 || |
1566 | #endif | 1555 | #endif |
1567 | cbq_dump_fopt(skb, cl) < 0) | 1556 | cbq_dump_fopt(skb, cl) < 0) |
@@ -1656,7 +1645,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1656 | cl->classid)) == NULL) | 1645 | cl->classid)) == NULL) |
1657 | return -ENOBUFS; | 1646 | return -ENOBUFS; |
1658 | } else { | 1647 | } else { |
1659 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1648 | #ifdef CONFIG_NET_CLS_ACT |
1660 | if (cl->police == TC_POLICE_RECLASSIFY) | 1649 | if (cl->police == TC_POLICE_RECLASSIFY) |
1661 | new->reshape_fail = cbq_reshape_fail; | 1650 | new->reshape_fail = cbq_reshape_fail; |
1662 | #endif | 1651 | #endif |
@@ -1721,7 +1710,7 @@ cbq_destroy(struct Qdisc* sch) | |||
1721 | struct cbq_class *cl; | 1710 | struct cbq_class *cl; |
1722 | unsigned h; | 1711 | unsigned h; |
1723 | 1712 | ||
1724 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1713 | #ifdef CONFIG_NET_CLS_ACT |
1725 | q->rx_class = NULL; | 1714 | q->rx_class = NULL; |
1726 | #endif | 1715 | #endif |
1727 | /* | 1716 | /* |
@@ -1750,7 +1739,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg) | |||
1750 | struct cbq_class *cl = (struct cbq_class*)arg; | 1739 | struct cbq_class *cl = (struct cbq_class*)arg; |
1751 | 1740 | ||
1752 | if (--cl->refcnt == 0) { | 1741 | if (--cl->refcnt == 0) { |
1753 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1742 | #ifdef CONFIG_NET_CLS_ACT |
1754 | struct cbq_sched_data *q = qdisc_priv(sch); | 1743 | struct cbq_sched_data *q = qdisc_priv(sch); |
1755 | 1744 | ||
1756 | spin_lock_bh(&sch->dev->queue_lock); | 1745 | spin_lock_bh(&sch->dev->queue_lock); |
@@ -1798,7 +1787,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1798 | RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) | 1787 | RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) |
1799 | return -EINVAL; | 1788 | return -EINVAL; |
1800 | 1789 | ||
1801 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1790 | #ifdef CONFIG_NET_CLS_ACT |
1802 | if (tb[TCA_CBQ_POLICE-1] && | 1791 | if (tb[TCA_CBQ_POLICE-1] && |
1803 | RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) | 1792 | RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) |
1804 | return -EINVAL; | 1793 | return -EINVAL; |
@@ -1841,7 +1830,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1841 | if (tb[TCA_CBQ_OVL_STRATEGY-1]) | 1830 | if (tb[TCA_CBQ_OVL_STRATEGY-1]) |
1842 | cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); | 1831 | cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); |
1843 | 1832 | ||
1844 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1833 | #ifdef CONFIG_NET_CLS_ACT |
1845 | if (tb[TCA_CBQ_POLICE-1]) | 1834 | if (tb[TCA_CBQ_POLICE-1]) |
1846 | cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); | 1835 | cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); |
1847 | #endif | 1836 | #endif |
@@ -1934,7 +1923,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1934 | cl->overlimit = cbq_ovl_classic; | 1923 | cl->overlimit = cbq_ovl_classic; |
1935 | if (tb[TCA_CBQ_OVL_STRATEGY-1]) | 1924 | if (tb[TCA_CBQ_OVL_STRATEGY-1]) |
1936 | cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); | 1925 | cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); |
1937 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1926 | #ifdef CONFIG_NET_CLS_ACT |
1938 | if (tb[TCA_CBQ_POLICE-1]) | 1927 | if (tb[TCA_CBQ_POLICE-1]) |
1939 | cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); | 1928 | cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); |
1940 | #endif | 1929 | #endif |
@@ -1978,7 +1967,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) | |||
1978 | q->tx_class = NULL; | 1967 | q->tx_class = NULL; |
1979 | q->tx_borrowed = NULL; | 1968 | q->tx_borrowed = NULL; |
1980 | } | 1969 | } |
1981 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 1970 | #ifdef CONFIG_NET_CLS_ACT |
1982 | if (q->rx_class == cl) | 1971 | if (q->rx_class == cl) |
1983 | q->rx_class = NULL; | 1972 | q->rx_class = NULL; |
1984 | #endif | 1973 | #endif |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 2d7e891e6b0d..60f89199e3da 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -246,21 +246,10 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch) | |||
246 | kfree_skb(skb); | 246 | kfree_skb(skb); |
247 | sch->qstats.drops++; | 247 | sch->qstats.drops++; |
248 | return NET_XMIT_BYPASS; | 248 | return NET_XMIT_BYPASS; |
249 | #elif defined(CONFIG_NET_CLS_POLICE) | ||
250 | case TC_POLICE_SHOT: | ||
251 | kfree_skb(skb); | ||
252 | sch->qstats.drops++; | ||
253 | return NET_XMIT_POLICED; | ||
254 | #if 0 | ||
255 | case TC_POLICE_RECLASSIFY: | ||
256 | /* FIXME: what to do here ??? */ | ||
257 | #endif | ||
258 | #endif | 249 | #endif |
259 | case TC_POLICE_OK: | 250 | case TC_ACT_OK: |
260 | skb->tc_index = TC_H_MIN(res.classid); | 251 | skb->tc_index = TC_H_MIN(res.classid); |
261 | break; | 252 | break; |
262 | case TC_POLICE_UNSPEC: | ||
263 | /* fall through */ | ||
264 | default: | 253 | default: |
265 | if (p->default_index != NO_DEFAULT_INDEX) | 254 | if (p->default_index != NO_DEFAULT_INDEX) |
266 | skb->tc_index = p->default_index; | 255 | skb->tc_index = p->default_index; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 874452c41a01..55e7e4530f43 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1174,9 +1174,6 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
1174 | case TC_ACT_SHOT: | 1174 | case TC_ACT_SHOT: |
1175 | return NULL; | 1175 | return NULL; |
1176 | } | 1176 | } |
1177 | #elif defined(CONFIG_NET_CLS_POLICE) | ||
1178 | if (result == TC_POLICE_SHOT) | ||
1179 | return NULL; | ||
1180 | #endif | 1177 | #endif |
1181 | if ((cl = (struct hfsc_class *)res.class) == NULL) { | 1178 | if ((cl = (struct hfsc_class *)res.class) == NULL) { |
1182 | if ((cl = hfsc_find_class(res.classid, sch)) == NULL) | 1179 | if ((cl = hfsc_find_class(res.classid, sch)) == NULL) |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index b417a95df322..246a2f9765f1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -249,9 +249,6 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
249 | case TC_ACT_SHOT: | 249 | case TC_ACT_SHOT: |
250 | return NULL; | 250 | return NULL; |
251 | } | 251 | } |
252 | #elif defined(CONFIG_NET_CLS_POLICE) | ||
253 | if (result == TC_POLICE_SHOT) | ||
254 | return HTB_DIRECT; | ||
255 | #endif | 252 | #endif |
256 | if ((cl = (void *)res.class) == NULL) { | 253 | if ((cl = (void *)res.class) == NULL) { |
257 | if (res.classid == sch->handle) | 254 | if (res.classid == sch->handle) |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index cd0aab6a2a7c..51f16b0af198 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -164,31 +164,12 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) | |||
164 | result = TC_ACT_OK; | 164 | result = TC_ACT_OK; |
165 | break; | 165 | break; |
166 | } | 166 | } |
167 | /* backward compat */ | ||
168 | #else | ||
169 | #ifdef CONFIG_NET_CLS_POLICE | ||
170 | switch (result) { | ||
171 | case TC_POLICE_SHOT: | ||
172 | result = NF_DROP; | ||
173 | sch->qstats.drops++; | ||
174 | break; | ||
175 | case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */ | ||
176 | case TC_POLICE_OK: | ||
177 | case TC_POLICE_UNSPEC: | ||
178 | default: | ||
179 | sch->bstats.packets++; | ||
180 | sch->bstats.bytes += skb->len; | ||
181 | result = NF_ACCEPT; | ||
182 | break; | ||
183 | } | ||
184 | |||
185 | #else | 167 | #else |
186 | D2PRINTK("Overriding result to ACCEPT\n"); | 168 | D2PRINTK("Overriding result to ACCEPT\n"); |
187 | result = NF_ACCEPT; | 169 | result = NF_ACCEPT; |
188 | sch->bstats.packets++; | 170 | sch->bstats.packets++; |
189 | sch->bstats.bytes += skb->len; | 171 | sch->bstats.bytes += skb->len; |
190 | #endif | 172 | #endif |
191 | #endif | ||
192 | 173 | ||
193 | return result; | 174 | return result; |
194 | } | 175 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index b8b3345cede8..8c2639af4c6a 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -125,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
125 | 125 | ||
126 | if (skb->len > q->max_size) { | 126 | if (skb->len > q->max_size) { |
127 | sch->qstats.drops++; | 127 | sch->qstats.drops++; |
128 | #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) | 128 | #ifdef CONFIG_NET_CLS_ACT |
129 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) | 129 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) |
130 | #endif | 130 | #endif |
131 | kfree_skb(skb); | 131 | kfree_skb(skb); |