aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/Kconfig8
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_police.c246
-rw-r--r--net/sched/cls_api.c40
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sched/sch_api.c73
-rw-r--r--net/sched/sch_atm.c475
-rw-r--r--net/sched/sch_cbq.c48
-rw-r--r--net/sched/sch_dsmark.c34
-rw-r--r--net/sched/sch_hfsc.c3
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_ingress.c19
-rw-r--r--net/sched/sch_tbf.c2
13 files changed, 327 insertions, 628 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index b4662888bdbd..d3f7c3f9407a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -472,12 +472,12 @@ config NET_ACT_SIMP
472 472
473config NET_CLS_POLICE 473config NET_CLS_POLICE
474 bool "Traffic Policing (obsolete)" 474 bool "Traffic Policing (obsolete)"
475 depends on NET_CLS_ACT!=y 475 select NET_CLS_ACT
476 select NET_ACT_POLICE
476 ---help--- 477 ---help---
477 Say Y here if you want to do traffic policing, i.e. strict 478 Say Y here if you want to do traffic policing, i.e. strict
478 bandwidth limiting. This option is obsoleted by the traffic 479 bandwidth limiting. This option is obsolete and just selects
479 policer implemented as action, it stays here for compatibility 480 the option replacing it. It will be removed in the future.
480 reasons.
481 481
482config NET_CLS_IND 482config NET_CLS_IND
483 bool "Incoming device classification" 483 bool "Incoming device classification"
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 020767a204d4..b67c36f65cf2 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o
8obj-$(CONFIG_NET_CLS) += cls_api.o 8obj-$(CONFIG_NET_CLS) += cls_api.o
9obj-$(CONFIG_NET_CLS_ACT) += act_api.o 9obj-$(CONFIG_NET_CLS_ACT) += act_api.o
10obj-$(CONFIG_NET_ACT_POLICE) += act_police.o 10obj-$(CONFIG_NET_ACT_POLICE) += act_police.o
11obj-$(CONFIG_NET_CLS_POLICE) += act_police.o
12obj-$(CONFIG_NET_ACT_GACT) += act_gact.o 11obj-$(CONFIG_NET_ACT_GACT) += act_gact.o
13obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o 12obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o
14obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o 13obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index d20403890877..bf90e60f8411 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -50,7 +50,6 @@ struct tc_police_compat
50 50
51/* Each policer is serialized by its individual spinlock */ 51/* Each policer is serialized by its individual spinlock */
52 52
53#ifdef CONFIG_NET_CLS_ACT
54static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, 53static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
55 int type, struct tc_action *a) 54 int type, struct tc_action *a)
56{ 55{
@@ -96,9 +95,8 @@ rtattr_failure:
96 nlmsg_trim(skb, r); 95 nlmsg_trim(skb, r);
97 goto done; 96 goto done;
98} 97}
99#endif
100 98
101void tcf_police_destroy(struct tcf_police *p) 99static void tcf_police_destroy(struct tcf_police *p)
102{ 100{
103 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); 101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
104 struct tcf_common **p1p; 102 struct tcf_common **p1p;
@@ -121,7 +119,6 @@ void tcf_police_destroy(struct tcf_police *p)
121 BUG_TRAP(0); 119 BUG_TRAP(0);
122} 120}
123 121
124#ifdef CONFIG_NET_CLS_ACT
125static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, 122static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
126 struct tc_action *a, int ovr, int bind) 123 struct tc_action *a, int ovr, int bind)
127{ 124{
@@ -247,10 +244,19 @@ failure:
247static int tcf_act_police_cleanup(struct tc_action *a, int bind) 244static int tcf_act_police_cleanup(struct tc_action *a, int bind)
248{ 245{
249 struct tcf_police *p = a->priv; 246 struct tcf_police *p = a->priv;
247 int ret = 0;
250 248
251 if (p != NULL) 249 if (p != NULL) {
252 return tcf_police_release(p, bind); 250 if (bind)
253 return 0; 251 p->tcf_bindcnt--;
252
253 p->tcf_refcnt--;
254 if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) {
255 tcf_police_destroy(p);
256 ret = 1;
257 }
258 }
259 return ret;
254} 260}
255 261
256static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 262static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
@@ -372,229 +378,3 @@ police_cleanup_module(void)
372 378
373module_init(police_init_module); 379module_init(police_init_module);
374module_exit(police_cleanup_module); 380module_exit(police_cleanup_module);
375
376#else /* CONFIG_NET_CLS_ACT */
377
378static struct tcf_common *tcf_police_lookup(u32 index)
379{
380 struct tcf_hashinfo *hinfo = &police_hash_info;
381 struct tcf_common *p;
382
383 read_lock(hinfo->lock);
384 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
385 p = p->tcfc_next) {
386 if (p->tcfc_index == index)
387 break;
388 }
389 read_unlock(hinfo->lock);
390
391 return p;
392}
393
394static u32 tcf_police_new_index(void)
395{
396 u32 *idx_gen = &police_idx_gen;
397 u32 val = *idx_gen;
398
399 do {
400 if (++val == 0)
401 val = 1;
402 } while (tcf_police_lookup(val));
403
404 return (*idx_gen = val);
405}
406
407struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
408{
409 unsigned int h;
410 struct tcf_police *police;
411 struct rtattr *tb[TCA_POLICE_MAX];
412 struct tc_police *parm;
413 int size;
414
415 if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
416 return NULL;
417
418 if (tb[TCA_POLICE_TBF-1] == NULL)
419 return NULL;
420 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
421 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
422 return NULL;
423
424 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
425
426 if (parm->index) {
427 struct tcf_common *pc;
428
429 pc = tcf_police_lookup(parm->index);
430 if (pc) {
431 police = to_police(pc);
432 police->tcf_refcnt++;
433 return police;
434 }
435 }
436 police = kzalloc(sizeof(*police), GFP_KERNEL);
437 if (unlikely(!police))
438 return NULL;
439
440 police->tcf_refcnt = 1;
441 spin_lock_init(&police->tcf_lock);
442 if (parm->rate.rate) {
443 police->tcfp_R_tab =
444 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
445 if (police->tcfp_R_tab == NULL)
446 goto failure;
447 if (parm->peakrate.rate) {
448 police->tcfp_P_tab =
449 qdisc_get_rtab(&parm->peakrate,
450 tb[TCA_POLICE_PEAKRATE-1]);
451 if (police->tcfp_P_tab == NULL)
452 goto failure;
453 }
454 }
455 if (tb[TCA_POLICE_RESULT-1]) {
456 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
457 goto failure;
458 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
459 }
460 if (tb[TCA_POLICE_AVRATE-1]) {
461 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
462 goto failure;
463 police->tcfp_ewma_rate =
464 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
465 }
466 police->tcfp_toks = police->tcfp_burst = parm->burst;
467 police->tcfp_mtu = parm->mtu;
468 if (police->tcfp_mtu == 0) {
469 police->tcfp_mtu = ~0;
470 if (police->tcfp_R_tab)
471 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
472 }
473 if (police->tcfp_P_tab)
474 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
475 police->tcfp_t_c = psched_get_time();
476 police->tcf_index = parm->index ? parm->index :
477 tcf_police_new_index();
478 police->tcf_action = parm->action;
479 if (est)
480 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
481 &police->tcf_lock, est);
482 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
483 write_lock_bh(&police_lock);
484 police->tcf_next = tcf_police_ht[h];
485 tcf_police_ht[h] = &police->common;
486 write_unlock_bh(&police_lock);
487 return police;
488
489failure:
490 if (police->tcfp_R_tab)
491 qdisc_put_rtab(police->tcfp_R_tab);
492 kfree(police);
493 return NULL;
494}
495
496int tcf_police(struct sk_buff *skb, struct tcf_police *police)
497{
498 psched_time_t now;
499 long toks;
500 long ptoks = 0;
501
502 spin_lock(&police->tcf_lock);
503
504 police->tcf_bstats.bytes += skb->len;
505 police->tcf_bstats.packets++;
506
507 if (police->tcfp_ewma_rate &&
508 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
509 police->tcf_qstats.overlimits++;
510 spin_unlock(&police->tcf_lock);
511 return police->tcf_action;
512 }
513 if (skb->len <= police->tcfp_mtu) {
514 if (police->tcfp_R_tab == NULL) {
515 spin_unlock(&police->tcf_lock);
516 return police->tcfp_result;
517 }
518
519 now = psched_get_time();
520 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
521 police->tcfp_burst);
522 if (police->tcfp_P_tab) {
523 ptoks = toks + police->tcfp_ptoks;
524 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
525 ptoks = (long)L2T_P(police, police->tcfp_mtu);
526 ptoks -= L2T_P(police, skb->len);
527 }
528 toks += police->tcfp_toks;
529 if (toks > (long)police->tcfp_burst)
530 toks = police->tcfp_burst;
531 toks -= L2T(police, skb->len);
532 if ((toks|ptoks) >= 0) {
533 police->tcfp_t_c = now;
534 police->tcfp_toks = toks;
535 police->tcfp_ptoks = ptoks;
536 spin_unlock(&police->tcf_lock);
537 return police->tcfp_result;
538 }
539 }
540
541 police->tcf_qstats.overlimits++;
542 spin_unlock(&police->tcf_lock);
543 return police->tcf_action;
544}
545EXPORT_SYMBOL(tcf_police);
546
547int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
548{
549 unsigned char *b = skb_tail_pointer(skb);
550 struct tc_police opt;
551
552 opt.index = police->tcf_index;
553 opt.action = police->tcf_action;
554 opt.mtu = police->tcfp_mtu;
555 opt.burst = police->tcfp_burst;
556 if (police->tcfp_R_tab)
557 opt.rate = police->tcfp_R_tab->rate;
558 else
559 memset(&opt.rate, 0, sizeof(opt.rate));
560 if (police->tcfp_P_tab)
561 opt.peakrate = police->tcfp_P_tab->rate;
562 else
563 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
564 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
565 if (police->tcfp_result)
566 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
567 &police->tcfp_result);
568 if (police->tcfp_ewma_rate)
569 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
570 return skb->len;
571
572rtattr_failure:
573 nlmsg_trim(skb, b);
574 return -1;
575}
576
577int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
578{
579 struct gnet_dump d;
580
581 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
582 TCA_XSTATS, &police->tcf_lock,
583 &d) < 0)
584 goto errout;
585
586 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
587 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
588 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
589 goto errout;
590
591 if (gnet_stats_finish_copy(&d) < 0)
592 goto errout;
593
594 return 0;
595
596errout:
597 return -1;
598}
599
600#endif /* CONFIG_NET_CLS_ACT */
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 36b72aab1bde..5f0fbca7393f 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -458,11 +458,6 @@ tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
458 tcf_action_destroy(exts->action, TCA_ACT_UNBIND); 458 tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
459 exts->action = NULL; 459 exts->action = NULL;
460 } 460 }
461#elif defined CONFIG_NET_CLS_POLICE
462 if (exts->police) {
463 tcf_police_release(exts->police, TCA_ACT_UNBIND);
464 exts->police = NULL;
465 }
466#endif 461#endif
467} 462}
468 463
@@ -496,17 +491,6 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
496 exts->action = act; 491 exts->action = act;
497 } 492 }
498 } 493 }
499#elif defined CONFIG_NET_CLS_POLICE
500 if (map->police && tb[map->police-1]) {
501 struct tcf_police *p;
502
503 p = tcf_police_locate(tb[map->police-1], rate_tlv);
504 if (p == NULL)
505 return -EINVAL;
506
507 exts->police = p;
508 } else if (map->action && tb[map->action-1])
509 return -EOPNOTSUPP;
510#else 494#else
511 if ((map->action && tb[map->action-1]) || 495 if ((map->action && tb[map->action-1]) ||
512 (map->police && tb[map->police-1])) 496 (map->police && tb[map->police-1]))
@@ -529,15 +513,6 @@ tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
529 if (act) 513 if (act)
530 tcf_action_destroy(act, TCA_ACT_UNBIND); 514 tcf_action_destroy(act, TCA_ACT_UNBIND);
531 } 515 }
532#elif defined CONFIG_NET_CLS_POLICE
533 if (src->police) {
534 struct tcf_police *p;
535 tcf_tree_lock(tp);
536 p = xchg(&dst->police, src->police);
537 tcf_tree_unlock(tp);
538 if (p)
539 tcf_police_release(p, TCA_ACT_UNBIND);
540 }
541#endif 516#endif
542} 517}
543 518
@@ -566,17 +541,6 @@ tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
566 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; 541 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
567 } 542 }
568 } 543 }
569#elif defined CONFIG_NET_CLS_POLICE
570 if (map->police && exts->police) {
571 struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb);
572
573 RTA_PUT(skb, map->police, 0, NULL);
574
575 if (tcf_police_dump(skb, exts->police) < 0)
576 goto rtattr_failure;
577
578 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
579 }
580#endif 544#endif
581 return 0; 545 return 0;
582rtattr_failure: __attribute__ ((unused)) 546rtattr_failure: __attribute__ ((unused))
@@ -591,10 +555,6 @@ tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
591 if (exts->action) 555 if (exts->action)
592 if (tcf_action_copy_stats(skb, exts->action, 1) < 0) 556 if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
593 goto rtattr_failure; 557 goto rtattr_failure;
594#elif defined CONFIG_NET_CLS_POLICE
595 if (exts->police)
596 if (tcf_police_dump_stats(skb, exts->police) < 0)
597 goto rtattr_failure;
598#endif 558#endif
599 return 0; 559 return 0;
600rtattr_failure: __attribute__ ((unused)) 560rtattr_failure: __attribute__ ((unused))
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 77961e2314dc..8dbe36912ecb 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -782,9 +782,6 @@ static int __init init_u32(void)
782#ifdef CONFIG_CLS_U32_PERF 782#ifdef CONFIG_CLS_U32_PERF
783 printk(" Performance counters on\n"); 783 printk(" Performance counters on\n");
784#endif 784#endif
785#ifdef CONFIG_NET_CLS_POLICE
786 printk(" OLD policer on \n");
787#endif
788#ifdef CONFIG_NET_CLS_IND 785#ifdef CONFIG_NET_CLS_IND
789 printk(" input device check on \n"); 786 printk(" input device check on \n");
790#endif 787#endif
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d92ea26982c5..13c09bc32aa3 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -278,11 +278,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
278 278
279 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 279 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
280 smp_wmb(); 280 smp_wmb();
281 if (spin_trylock(&dev->queue_lock)) { 281 netif_schedule(dev);
282 qdisc_run(dev);
283 spin_unlock(&dev->queue_lock);
284 } else
285 netif_schedule(dev);
286 282
287 return HRTIMER_NORESTART; 283 return HRTIMER_NORESTART;
288} 284}
@@ -1149,47 +1145,57 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1149 to this qdisc, (optionally) tests for protocol and asks 1145 to this qdisc, (optionally) tests for protocol and asks
1150 specific classifiers. 1146 specific classifiers.
1151 */ 1147 */
1148int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1149 struct tcf_result *res)
1150{
1151 __be16 protocol = skb->protocol;
1152 int err = 0;
1153
1154 for (; tp; tp = tp->next) {
1155 if ((tp->protocol == protocol ||
1156 tp->protocol == htons(ETH_P_ALL)) &&
1157 (err = tp->classify(skb, tp, res)) >= 0) {
1158#ifdef CONFIG_NET_CLS_ACT
1159 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1160 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1161#endif
1162 return err;
1163 }
1164 }
1165 return -1;
1166}
1167EXPORT_SYMBOL(tc_classify_compat);
1168
1152int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 1169int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1153 struct tcf_result *res) 1170 struct tcf_result *res)
1154{ 1171{
1155 int err = 0; 1172 int err = 0;
1156 __be16 protocol = skb->protocol; 1173 __be16 protocol;
1157#ifdef CONFIG_NET_CLS_ACT 1174#ifdef CONFIG_NET_CLS_ACT
1158 struct tcf_proto *otp = tp; 1175 struct tcf_proto *otp = tp;
1159reclassify: 1176reclassify:
1160#endif 1177#endif
1161 protocol = skb->protocol; 1178 protocol = skb->protocol;
1162 1179
1163 for ( ; tp; tp = tp->next) { 1180 err = tc_classify_compat(skb, tp, res);
1164 if ((tp->protocol == protocol ||
1165 tp->protocol == htons(ETH_P_ALL)) &&
1166 (err = tp->classify(skb, tp, res)) >= 0) {
1167#ifdef CONFIG_NET_CLS_ACT 1181#ifdef CONFIG_NET_CLS_ACT
1168 if ( TC_ACT_RECLASSIFY == err) { 1182 if (err == TC_ACT_RECLASSIFY) {
1169 __u32 verd = (__u32) G_TC_VERD(skb->tc_verd); 1183 u32 verd = G_TC_VERD(skb->tc_verd);
1170 tp = otp; 1184 tp = otp;
1171 1185
1172 if (MAX_REC_LOOP < verd++) { 1186 if (verd++ >= MAX_REC_LOOP) {
1173 printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n", 1187 printk("rule prio %u protocol %02x reclassify loop, "
1174 tp->prio&0xffff, ntohs(tp->protocol)); 1188 "packet dropped\n",
1175 return TC_ACT_SHOT; 1189 tp->prio&0xffff, ntohs(tp->protocol));
1176 } 1190 return TC_ACT_SHOT;
1177 skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
1178 goto reclassify;
1179 } else {
1180 if (skb->tc_verd)
1181 skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
1182 return err;
1183 }
1184#else
1185
1186 return err;
1187#endif
1188 } 1191 }
1189 1192 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1193 goto reclassify;
1190 } 1194 }
1191 return -1; 1195#endif
1196 return err;
1192} 1197}
1198EXPORT_SYMBOL(tc_classify);
1193 1199
1194void tcf_destroy(struct tcf_proto *tp) 1200void tcf_destroy(struct tcf_proto *tp)
1195{ 1201{
@@ -1256,4 +1262,3 @@ EXPORT_SYMBOL(qdisc_get_rtab);
1256EXPORT_SYMBOL(qdisc_put_rtab); 1262EXPORT_SYMBOL(qdisc_put_rtab);
1257EXPORT_SYMBOL(register_qdisc); 1263EXPORT_SYMBOL(register_qdisc);
1258EXPORT_SYMBOL(unregister_qdisc); 1264EXPORT_SYMBOL(unregister_qdisc);
1259EXPORT_SYMBOL(tc_classify);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 54b92d22796c..417ec8fb7f1a 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -2,7 +2,6 @@
2 2
3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4 4
5
6#include <linux/module.h> 5#include <linux/module.h>
7#include <linux/init.h> 6#include <linux/init.h>
8#include <linux/string.h> 7#include <linux/string.h>
@@ -11,12 +10,11 @@
11#include <linux/atmdev.h> 10#include <linux/atmdev.h>
12#include <linux/atmclip.h> 11#include <linux/atmclip.h>
13#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
14#include <linux/file.h> /* for fput */ 13#include <linux/file.h> /* for fput */
15#include <net/netlink.h> 14#include <net/netlink.h>
16#include <net/pkt_sched.h> 15#include <net/pkt_sched.h>
17 16
18 17extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
19extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
20 18
21#if 0 /* control */ 19#if 0 /* control */
22#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) 20#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
@@ -30,7 +28,6 @@ extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
30#define D2PRINTK(format,args...) 28#define D2PRINTK(format,args...)
31#endif 29#endif
32 30
33
34/* 31/*
35 * The ATM queuing discipline provides a framework for invoking classifiers 32 * The ATM queuing discipline provides a framework for invoking classifiers
36 * (aka "filters"), which in turn select classes of this queuing discipline. 33 * (aka "filters"), which in turn select classes of this queuing discipline.
@@ -52,16 +49,15 @@ extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
52 * - should lock the flow while there is data in the queue (?) 49 * - should lock the flow while there is data in the queue (?)
53 */ 50 */
54 51
55
56#define PRIV(sch) qdisc_priv(sch) 52#define PRIV(sch) qdisc_priv(sch)
57#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back)) 53#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
58 54
59
60struct atm_flow_data { 55struct atm_flow_data {
61 struct Qdisc *q; /* FIFO, TBF, etc. */ 56 struct Qdisc *q; /* FIFO, TBF, etc. */
62 struct tcf_proto *filter_list; 57 struct tcf_proto *filter_list;
63 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ 58 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
64 void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* chaining */ 59 void (*old_pop)(struct atm_vcc *vcc,
60 struct sk_buff * skb); /* chaining */
65 struct atm_qdisc_data *parent; /* parent qdisc */ 61 struct atm_qdisc_data *parent; /* parent qdisc */
66 struct socket *sock; /* for closing */ 62 struct socket *sock; /* for closing */
67 u32 classid; /* x:y type ID */ 63 u32 classid; /* x:y type ID */
@@ -82,76 +78,74 @@ struct atm_qdisc_data {
82 struct tasklet_struct task; /* requeue tasklet */ 78 struct tasklet_struct task; /* requeue tasklet */
83}; 79};
84 80
85
86/* ------------------------- Class/flow operations ------------------------- */ 81/* ------------------------- Class/flow operations ------------------------- */
87 82
88 83static int find_flow(struct atm_qdisc_data *qdisc, struct atm_flow_data *flow)
89static int find_flow(struct atm_qdisc_data *qdisc,struct atm_flow_data *flow)
90{ 84{
91 struct atm_flow_data *walk; 85 struct atm_flow_data *walk;
92 86
93 DPRINTK("find_flow(qdisc %p,flow %p)\n",qdisc,flow); 87 DPRINTK("find_flow(qdisc %p,flow %p)\n", qdisc, flow);
94 for (walk = qdisc->flows; walk; walk = walk->next) 88 for (walk = qdisc->flows; walk; walk = walk->next)
95 if (walk == flow) return 1; 89 if (walk == flow)
90 return 1;
96 DPRINTK("find_flow: not found\n"); 91 DPRINTK("find_flow: not found\n");
97 return 0; 92 return 0;
98} 93}
99 94
100 95static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
101static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch,
102 u32 classid)
103{ 96{
104 struct atm_qdisc_data *p = PRIV(sch); 97 struct atm_qdisc_data *p = PRIV(sch);
105 struct atm_flow_data *flow; 98 struct atm_flow_data *flow;
106 99
107 for (flow = p->flows; flow; flow = flow->next) 100 for (flow = p->flows; flow; flow = flow->next)
108 if (flow->classid == classid) break; 101 if (flow->classid == classid)
102 break;
109 return flow; 103 return flow;
110} 104}
111 105
112 106static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
113static int atm_tc_graft(struct Qdisc *sch,unsigned long arg, 107 struct Qdisc *new, struct Qdisc **old)
114 struct Qdisc *new,struct Qdisc **old)
115{ 108{
116 struct atm_qdisc_data *p = PRIV(sch); 109 struct atm_qdisc_data *p = PRIV(sch);
117 struct atm_flow_data *flow = (struct atm_flow_data *) arg; 110 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
118 111
119 DPRINTK("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",sch, 112 DPRINTK("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
120 p,flow,new,old); 113 sch, p, flow, new, old);
121 if (!find_flow(p,flow)) return -EINVAL; 114 if (!find_flow(p, flow))
122 if (!new) new = &noop_qdisc; 115 return -EINVAL;
123 *old = xchg(&flow->q,new); 116 if (!new)
124 if (*old) qdisc_reset(*old); 117 new = &noop_qdisc;
118 *old = xchg(&flow->q, new);
119 if (*old)
120 qdisc_reset(*old);
125 return 0; 121 return 0;
126} 122}
127 123
128 124static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
129static struct Qdisc *atm_tc_leaf(struct Qdisc *sch,unsigned long cl)
130{ 125{
131 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 126 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
132 127
133 DPRINTK("atm_tc_leaf(sch %p,flow %p)\n",sch,flow); 128 DPRINTK("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
134 return flow ? flow->q : NULL; 129 return flow ? flow->q : NULL;
135} 130}
136 131
137 132static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
138static unsigned long atm_tc_get(struct Qdisc *sch,u32 classid)
139{ 133{
140 struct atm_qdisc_data *p __attribute__((unused)) = PRIV(sch); 134 struct atm_qdisc_data *p __maybe_unused = PRIV(sch);
141 struct atm_flow_data *flow; 135 struct atm_flow_data *flow;
142 136
143 DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid); 137 DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
144 flow = lookup_flow(sch,classid); 138 flow = lookup_flow(sch, classid);
145 if (flow) flow->ref++; 139 if (flow)
146 DPRINTK("atm_tc_get: flow %p\n",flow); 140 flow->ref++;
147 return (unsigned long) flow; 141 DPRINTK("atm_tc_get: flow %p\n", flow);
142 return (unsigned long)flow;
148} 143}
149 144
150
151static unsigned long atm_tc_bind_filter(struct Qdisc *sch, 145static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
152 unsigned long parent, u32 classid) 146 unsigned long parent, u32 classid)
153{ 147{
154 return atm_tc_get(sch,classid); 148 return atm_tc_get(sch, classid);
155} 149}
156 150
157/* 151/*
@@ -159,72 +153,75 @@ static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
159 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop 153 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
160 * anything that still seems to be in use. 154 * anything that still seems to be in use.
161 */ 155 */
162
163static void atm_tc_put(struct Qdisc *sch, unsigned long cl) 156static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
164{ 157{
165 struct atm_qdisc_data *p = PRIV(sch); 158 struct atm_qdisc_data *p = PRIV(sch);
166 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 159 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
167 struct atm_flow_data **prev; 160 struct atm_flow_data **prev;
168 161
169 DPRINTK("atm_tc_put(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); 162 DPRINTK("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
170 if (--flow->ref) return; 163 if (--flow->ref)
164 return;
171 DPRINTK("atm_tc_put: destroying\n"); 165 DPRINTK("atm_tc_put: destroying\n");
172 for (prev = &p->flows; *prev; prev = &(*prev)->next) 166 for (prev = &p->flows; *prev; prev = &(*prev)->next)
173 if (*prev == flow) break; 167 if (*prev == flow)
168 break;
174 if (!*prev) { 169 if (!*prev) {
175 printk(KERN_CRIT "atm_tc_put: class %p not found\n",flow); 170 printk(KERN_CRIT "atm_tc_put: class %p not found\n", flow);
176 return; 171 return;
177 } 172 }
178 *prev = flow->next; 173 *prev = flow->next;
179 DPRINTK("atm_tc_put: qdisc %p\n",flow->q); 174 DPRINTK("atm_tc_put: qdisc %p\n", flow->q);
180 qdisc_destroy(flow->q); 175 qdisc_destroy(flow->q);
181 tcf_destroy_chain(flow->filter_list); 176 tcf_destroy_chain(flow->filter_list);
182 if (flow->sock) { 177 if (flow->sock) {
183 DPRINTK("atm_tc_put: f_count %d\n", 178 DPRINTK("atm_tc_put: f_count %d\n",
184 file_count(flow->sock->file)); 179 file_count(flow->sock->file));
185 flow->vcc->pop = flow->old_pop; 180 flow->vcc->pop = flow->old_pop;
186 sockfd_put(flow->sock); 181 sockfd_put(flow->sock);
187 } 182 }
188 if (flow->excess) atm_tc_put(sch,(unsigned long) flow->excess); 183 if (flow->excess)
189 if (flow != &p->link) kfree(flow); 184 atm_tc_put(sch, (unsigned long)flow->excess);
185 if (flow != &p->link)
186 kfree(flow);
190 /* 187 /*
191 * If flow == &p->link, the qdisc no longer works at this point and 188 * If flow == &p->link, the qdisc no longer works at this point and
192 * needs to be removed. (By the caller of atm_tc_put.) 189 * needs to be removed. (By the caller of atm_tc_put.)
193 */ 190 */
194} 191}
195 192
196 193static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
197static void sch_atm_pop(struct atm_vcc *vcc,struct sk_buff *skb)
198{ 194{
199 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent; 195 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
200 196
201 D2PRINTK("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n",vcc,skb,p); 197 D2PRINTK("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
202 VCC2FLOW(vcc)->old_pop(vcc,skb); 198 VCC2FLOW(vcc)->old_pop(vcc, skb);
203 tasklet_schedule(&p->task); 199 tasklet_schedule(&p->task);
204} 200}
205 201
206static const u8 llc_oui_ip[] = { 202static const u8 llc_oui_ip[] = {
207 0xaa, /* DSAP: non-ISO */ 203 0xaa, /* DSAP: non-ISO */
208 0xaa, /* SSAP: non-ISO */ 204 0xaa, /* SSAP: non-ISO */
209 0x03, /* Ctrl: Unnumbered Information Command PDU */ 205 0x03, /* Ctrl: Unnumbered Information Command PDU */
210 0x00, /* OUI: EtherType */ 206 0x00, /* OUI: EtherType */
211 0x00, 0x00, 207 0x00, 0x00,
212 0x08, 0x00 }; /* Ethertype IP (0800) */ 208 0x08, 0x00
209}; /* Ethertype IP (0800) */
213 210
214static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, 211static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
215 struct rtattr **tca, unsigned long *arg) 212 struct rtattr **tca, unsigned long *arg)
216{ 213{
217 struct atm_qdisc_data *p = PRIV(sch); 214 struct atm_qdisc_data *p = PRIV(sch);
218 struct atm_flow_data *flow = (struct atm_flow_data *) *arg; 215 struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
219 struct atm_flow_data *excess = NULL; 216 struct atm_flow_data *excess = NULL;
220 struct rtattr *opt = tca[TCA_OPTIONS-1]; 217 struct rtattr *opt = tca[TCA_OPTIONS - 1];
221 struct rtattr *tb[TCA_ATM_MAX]; 218 struct rtattr *tb[TCA_ATM_MAX];
222 struct socket *sock; 219 struct socket *sock;
223 int fd,error,hdr_len; 220 int fd, error, hdr_len;
224 void *hdr; 221 void *hdr;
225 222
226 DPRINTK("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x," 223 DPRINTK("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
227 "flow %p,opt %p)\n",sch,p,classid,parent,flow,opt); 224 "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
228 /* 225 /*
229 * The concept of parents doesn't apply for this qdisc. 226 * The concept of parents doesn't apply for this qdisc.
230 */ 227 */
@@ -237,33 +234,36 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
237 * class needs to be removed and a new one added. (This may be changed 234 * class needs to be removed and a new one added. (This may be changed
238 * later.) 235 * later.)
239 */ 236 */
240 if (flow) return -EBUSY; 237 if (flow)
238 return -EBUSY;
241 if (opt == NULL || rtattr_parse_nested(tb, TCA_ATM_MAX, opt)) 239 if (opt == NULL || rtattr_parse_nested(tb, TCA_ATM_MAX, opt))
242 return -EINVAL; 240 return -EINVAL;
243 if (!tb[TCA_ATM_FD-1] || RTA_PAYLOAD(tb[TCA_ATM_FD-1]) < sizeof(fd)) 241 if (!tb[TCA_ATM_FD - 1] || RTA_PAYLOAD(tb[TCA_ATM_FD - 1]) < sizeof(fd))
244 return -EINVAL; 242 return -EINVAL;
245 fd = *(int *) RTA_DATA(tb[TCA_ATM_FD-1]); 243 fd = *(int *)RTA_DATA(tb[TCA_ATM_FD - 1]);
246 DPRINTK("atm_tc_change: fd %d\n",fd); 244 DPRINTK("atm_tc_change: fd %d\n", fd);
247 if (tb[TCA_ATM_HDR-1]) { 245 if (tb[TCA_ATM_HDR - 1]) {
248 hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR-1]); 246 hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR - 1]);
249 hdr = RTA_DATA(tb[TCA_ATM_HDR-1]); 247 hdr = RTA_DATA(tb[TCA_ATM_HDR - 1]);
250 } 248 } else {
251 else {
252 hdr_len = RFC1483LLC_LEN; 249 hdr_len = RFC1483LLC_LEN;
253 hdr = NULL; /* default LLC/SNAP for IP */ 250 hdr = NULL; /* default LLC/SNAP for IP */
254 } 251 }
255 if (!tb[TCA_ATM_EXCESS-1]) excess = NULL; 252 if (!tb[TCA_ATM_EXCESS - 1])
253 excess = NULL;
256 else { 254 else {
257 if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS-1]) != sizeof(u32)) 255 if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS - 1]) != sizeof(u32))
258 return -EINVAL; 256 return -EINVAL;
259 excess = (struct atm_flow_data *) atm_tc_get(sch, 257 excess = (struct atm_flow_data *)
260 *(u32 *) RTA_DATA(tb[TCA_ATM_EXCESS-1])); 258 atm_tc_get(sch, *(u32 *)RTA_DATA(tb[TCA_ATM_EXCESS - 1]));
261 if (!excess) return -ENOENT; 259 if (!excess)
260 return -ENOENT;
262 } 261 }
263 DPRINTK("atm_tc_change: type %d, payload %d, hdr_len %d\n", 262 DPRINTK("atm_tc_change: type %d, payload %d, hdr_len %d\n",
264 opt->rta_type,RTA_PAYLOAD(opt),hdr_len); 263 opt->rta_type, RTA_PAYLOAD(opt), hdr_len);
265 if (!(sock = sockfd_lookup(fd,&error))) return error; /* f_count++ */ 264 if (!(sock = sockfd_lookup(fd, &error)))
266 DPRINTK("atm_tc_change: f_count %d\n",file_count(sock->file)); 265 return error; /* f_count++ */
266 DPRINTK("atm_tc_change: f_count %d\n", file_count(sock->file));
267 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { 267 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
268 error = -EPROTOTYPE; 268 error = -EPROTOTYPE;
269 goto err_out; 269 goto err_out;
@@ -276,37 +276,37 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
276 error = -EINVAL; 276 error = -EINVAL;
277 goto err_out; 277 goto err_out;
278 } 278 }
279 if (find_flow(p,flow)) { 279 if (find_flow(p, flow)) {
280 error = -EEXIST; 280 error = -EEXIST;
281 goto err_out; 281 goto err_out;
282 } 282 }
283 } 283 } else {
284 else {
285 int i; 284 int i;
286 unsigned long cl; 285 unsigned long cl;
287 286
288 for (i = 1; i < 0x8000; i++) { 287 for (i = 1; i < 0x8000; i++) {
289 classid = TC_H_MAKE(sch->handle,0x8000 | i); 288 classid = TC_H_MAKE(sch->handle, 0x8000 | i);
290 if (!(cl = atm_tc_get(sch,classid))) break; 289 if (!(cl = atm_tc_get(sch, classid)))
291 atm_tc_put(sch,cl); 290 break;
291 atm_tc_put(sch, cl);
292 } 292 }
293 } 293 }
294 DPRINTK("atm_tc_change: new id %x\n",classid); 294 DPRINTK("atm_tc_change: new id %x\n", classid);
295 flow = kmalloc(sizeof(struct atm_flow_data)+hdr_len,GFP_KERNEL); 295 flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
296 DPRINTK("atm_tc_change: flow %p\n",flow); 296 DPRINTK("atm_tc_change: flow %p\n", flow);
297 if (!flow) { 297 if (!flow) {
298 error = -ENOBUFS; 298 error = -ENOBUFS;
299 goto err_out; 299 goto err_out;
300 } 300 }
301 memset(flow,0,sizeof(*flow)); 301 memset(flow, 0, sizeof(*flow));
302 flow->filter_list = NULL; 302 flow->filter_list = NULL;
303 if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,classid))) 303 if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
304 flow->q = &noop_qdisc; 304 flow->q = &noop_qdisc;
305 DPRINTK("atm_tc_change: qdisc %p\n",flow->q); 305 DPRINTK("atm_tc_change: qdisc %p\n", flow->q);
306 flow->sock = sock; 306 flow->sock = sock;
307 flow->vcc = ATM_SD(sock); /* speedup */ 307 flow->vcc = ATM_SD(sock); /* speedup */
308 flow->vcc->user_back = flow; 308 flow->vcc->user_back = flow;
309 DPRINTK("atm_tc_change: vcc %p\n",flow->vcc); 309 DPRINTK("atm_tc_change: vcc %p\n", flow->vcc);
310 flow->old_pop = flow->vcc->pop; 310 flow->old_pop = flow->vcc->pop;
311 flow->parent = p; 311 flow->parent = p;
312 flow->vcc->pop = sch_atm_pop; 312 flow->vcc->pop = sch_atm_pop;
@@ -317,50 +317,53 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
317 p->link.next = flow; 317 p->link.next = flow;
318 flow->hdr_len = hdr_len; 318 flow->hdr_len = hdr_len;
319 if (hdr) 319 if (hdr)
320 memcpy(flow->hdr,hdr,hdr_len); 320 memcpy(flow->hdr, hdr, hdr_len);
321 else 321 else
322 memcpy(flow->hdr,llc_oui_ip,sizeof(llc_oui_ip)); 322 memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
323 *arg = (unsigned long) flow; 323 *arg = (unsigned long)flow;
324 return 0; 324 return 0;
325err_out: 325err_out:
326 if (excess) atm_tc_put(sch,(unsigned long) excess); 326 if (excess)
327 atm_tc_put(sch, (unsigned long)excess);
327 sockfd_put(sock); 328 sockfd_put(sock);
328 return error; 329 return error;
329} 330}
330 331
331 332static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
332static int atm_tc_delete(struct Qdisc *sch,unsigned long arg)
333{ 333{
334 struct atm_qdisc_data *p = PRIV(sch); 334 struct atm_qdisc_data *p = PRIV(sch);
335 struct atm_flow_data *flow = (struct atm_flow_data *) arg; 335 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
336 336
337 DPRINTK("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); 337 DPRINTK("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
338 if (!find_flow(PRIV(sch),flow)) return -EINVAL; 338 if (!find_flow(PRIV(sch), flow))
339 if (flow->filter_list || flow == &p->link) return -EBUSY; 339 return -EINVAL;
340 if (flow->filter_list || flow == &p->link)
341 return -EBUSY;
340 /* 342 /*
341 * Reference count must be 2: one for "keepalive" (set at class 343 * Reference count must be 2: one for "keepalive" (set at class
342 * creation), and one for the reference held when calling delete. 344 * creation), and one for the reference held when calling delete.
343 */ 345 */
344 if (flow->ref < 2) { 346 if (flow->ref < 2) {
345 printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n",flow->ref); 347 printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
346 return -EINVAL; 348 return -EINVAL;
347 } 349 }
348 if (flow->ref > 2) return -EBUSY; /* catch references via excess, etc.*/ 350 if (flow->ref > 2)
349 atm_tc_put(sch,arg); 351 return -EBUSY; /* catch references via excess, etc. */
352 atm_tc_put(sch, arg);
350 return 0; 353 return 0;
351} 354}
352 355
353 356static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
354static void atm_tc_walk(struct Qdisc *sch,struct qdisc_walker *walker)
355{ 357{
356 struct atm_qdisc_data *p = PRIV(sch); 358 struct atm_qdisc_data *p = PRIV(sch);
357 struct atm_flow_data *flow; 359 struct atm_flow_data *flow;
358 360
359 DPRINTK("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker); 361 DPRINTK("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
360 if (walker->stop) return; 362 if (walker->stop)
363 return;
361 for (flow = p->flows; flow; flow = flow->next) { 364 for (flow = p->flows; flow; flow = flow->next) {
362 if (walker->count >= walker->skip) 365 if (walker->count >= walker->skip)
363 if (walker->fn(sch,(unsigned long) flow,walker) < 0) { 366 if (walker->fn(sch, (unsigned long)flow, walker) < 0) {
364 walker->stop = 1; 367 walker->stop = 1;
365 break; 368 break;
366 } 369 }
@@ -368,73 +371,71 @@ static void atm_tc_walk(struct Qdisc *sch,struct qdisc_walker *walker)
368 } 371 }
369} 372}
370 373
371 374static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
372static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch,unsigned long cl)
373{ 375{
374 struct atm_qdisc_data *p = PRIV(sch); 376 struct atm_qdisc_data *p = PRIV(sch);
375 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 377 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
376 378
377 DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); 379 DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
378 return flow ? &flow->filter_list : &p->link.filter_list; 380 return flow ? &flow->filter_list : &p->link.filter_list;
379} 381}
380 382
381
382/* --------------------------- Qdisc operations ---------------------------- */ 383/* --------------------------- Qdisc operations ---------------------------- */
383 384
384 385static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
385static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
386{ 386{
387 struct atm_qdisc_data *p = PRIV(sch); 387 struct atm_qdisc_data *p = PRIV(sch);
388 struct atm_flow_data *flow = NULL ; /* @@@ */ 388 struct atm_flow_data *flow = NULL; /* @@@ */
389 struct tcf_result res; 389 struct tcf_result res;
390 int result; 390 int result;
391 int ret = NET_XMIT_POLICED; 391 int ret = NET_XMIT_POLICED;
392 392
393 D2PRINTK("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); 393 D2PRINTK("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
394 result = TC_POLICE_OK; /* be nice to gcc */ 394 result = TC_POLICE_OK; /* be nice to gcc */
395 if (TC_H_MAJ(skb->priority) != sch->handle || 395 if (TC_H_MAJ(skb->priority) != sch->handle ||
396 !(flow = (struct atm_flow_data *) atm_tc_get(sch,skb->priority))) 396 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority)))
397 for (flow = p->flows; flow; flow = flow->next) 397 for (flow = p->flows; flow; flow = flow->next)
398 if (flow->filter_list) { 398 if (flow->filter_list) {
399 result = tc_classify(skb,flow->filter_list, 399 result = tc_classify_compat(skb,
400 &res); 400 flow->filter_list,
401 if (result < 0) continue; 401 &res);
402 flow = (struct atm_flow_data *) res.class; 402 if (result < 0)
403 if (!flow) flow = lookup_flow(sch,res.classid); 403 continue;
404 flow = (struct atm_flow_data *)res.class;
405 if (!flow)
406 flow = lookup_flow(sch, res.classid);
404 break; 407 break;
405 } 408 }
406 if (!flow) flow = &p->link; 409 if (!flow)
410 flow = &p->link;
407 else { 411 else {
408 if (flow->vcc) 412 if (flow->vcc)
409 ATM_SKB(skb)->atm_options = flow->vcc->atm_options; 413 ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
410 /*@@@ looks good ... but it's not supposed to work :-)*/ 414 /*@@@ looks good ... but it's not supposed to work :-) */
411#ifdef CONFIG_NET_CLS_POLICE 415#ifdef CONFIG_NET_CLS_ACT
412 switch (result) { 416 switch (result) {
413 case TC_POLICE_SHOT: 417 case TC_ACT_QUEUED:
414 kfree_skb(skb); 418 case TC_ACT_STOLEN:
415 break; 419 kfree_skb(skb);
416 case TC_POLICE_RECLASSIFY: 420 return NET_XMIT_SUCCESS;
417 if (flow->excess) flow = flow->excess; 421 case TC_ACT_SHOT:
418 else { 422 kfree_skb(skb);
419 ATM_SKB(skb)->atm_options |= 423 goto drop;
420 ATM_ATMOPT_CLP; 424 case TC_POLICE_RECLASSIFY:
421 break; 425 if (flow->excess)
422 } 426 flow = flow->excess;
423 /* fall through */ 427 else
424 case TC_POLICE_OK: 428 ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
425 /* fall through */ 429 break;
426 default:
427 break;
428 } 430 }
429#endif 431#endif
430 } 432 }
431 if ( 433
432#ifdef CONFIG_NET_CLS_POLICE 434 if ((ret = flow->q->enqueue(skb, flow->q)) != 0) {
433 result == TC_POLICE_SHOT || 435drop: __maybe_unused
434#endif
435 (ret = flow->q->enqueue(skb,flow->q)) != 0) {
436 sch->qstats.drops++; 436 sch->qstats.drops++;
437 if (flow) flow->qstats.drops++; 437 if (flow)
438 flow->qstats.drops++;
438 return ret; 439 return ret;
439 } 440 }
440 sch->bstats.bytes += skb->len; 441 sch->bstats.bytes += skb->len;
@@ -458,7 +459,6 @@ static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
458 return NET_XMIT_BYPASS; 459 return NET_XMIT_BYPASS;
459} 460}
460 461
461
462/* 462/*
463 * Dequeue packets and send them over ATM. Note that we quite deliberately 463 * Dequeue packets and send them over ATM. Note that we quite deliberately
464 * avoid checking net_device's flow control here, simply because sch_atm 464 * avoid checking net_device's flow control here, simply because sch_atm
@@ -466,167 +466,163 @@ static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
466 * non-ATM interfaces. 466 * non-ATM interfaces.
467 */ 467 */
468 468
469
470static void sch_atm_dequeue(unsigned long data) 469static void sch_atm_dequeue(unsigned long data)
471{ 470{
472 struct Qdisc *sch = (struct Qdisc *) data; 471 struct Qdisc *sch = (struct Qdisc *)data;
473 struct atm_qdisc_data *p = PRIV(sch); 472 struct atm_qdisc_data *p = PRIV(sch);
474 struct atm_flow_data *flow; 473 struct atm_flow_data *flow;
475 struct sk_buff *skb; 474 struct sk_buff *skb;
476 475
477 D2PRINTK("sch_atm_dequeue(sch %p,[qdisc %p])\n",sch,p); 476 D2PRINTK("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
478 for (flow = p->link.next; flow; flow = flow->next) 477 for (flow = p->link.next; flow; flow = flow->next)
479 /* 478 /*
480 * If traffic is properly shaped, this won't generate nasty 479 * If traffic is properly shaped, this won't generate nasty
481 * little bursts. Otherwise, it may ... (but that's okay) 480 * little bursts. Otherwise, it may ... (but that's okay)
482 */ 481 */
483 while ((skb = flow->q->dequeue(flow->q))) { 482 while ((skb = flow->q->dequeue(flow->q))) {
484 if (!atm_may_send(flow->vcc,skb->truesize)) { 483 if (!atm_may_send(flow->vcc, skb->truesize)) {
485 (void) flow->q->ops->requeue(skb,flow->q); 484 (void)flow->q->ops->requeue(skb, flow->q);
486 break; 485 break;
487 } 486 }
488 D2PRINTK("atm_tc_dequeue: sending on class %p\n",flow); 487 D2PRINTK("atm_tc_dequeue: sending on class %p\n", flow);
489 /* remove any LL header somebody else has attached */ 488 /* remove any LL header somebody else has attached */
490 skb_pull(skb, skb_network_offset(skb)); 489 skb_pull(skb, skb_network_offset(skb));
491 if (skb_headroom(skb) < flow->hdr_len) { 490 if (skb_headroom(skb) < flow->hdr_len) {
492 struct sk_buff *new; 491 struct sk_buff *new;
493 492
494 new = skb_realloc_headroom(skb,flow->hdr_len); 493 new = skb_realloc_headroom(skb, flow->hdr_len);
495 dev_kfree_skb(skb); 494 dev_kfree_skb(skb);
496 if (!new) continue; 495 if (!new)
496 continue;
497 skb = new; 497 skb = new;
498 } 498 }
499 D2PRINTK("sch_atm_dequeue: ip %p, data %p\n", 499 D2PRINTK("sch_atm_dequeue: ip %p, data %p\n",
500 skb_network_header(skb), skb->data); 500 skb_network_header(skb), skb->data);
501 ATM_SKB(skb)->vcc = flow->vcc; 501 ATM_SKB(skb)->vcc = flow->vcc;
502 memcpy(skb_push(skb,flow->hdr_len),flow->hdr, 502 memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
503 flow->hdr_len); 503 flow->hdr_len);
504 atomic_add(skb->truesize, 504 atomic_add(skb->truesize,
505 &sk_atm(flow->vcc)->sk_wmem_alloc); 505 &sk_atm(flow->vcc)->sk_wmem_alloc);
506 /* atm.atm_options are already set by atm_tc_enqueue */ 506 /* atm.atm_options are already set by atm_tc_enqueue */
507 (void) flow->vcc->send(flow->vcc,skb); 507 flow->vcc->send(flow->vcc, skb);
508 } 508 }
509} 509}
510 510
511
512static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) 511static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
513{ 512{
514 struct atm_qdisc_data *p = PRIV(sch); 513 struct atm_qdisc_data *p = PRIV(sch);
515 struct sk_buff *skb; 514 struct sk_buff *skb;
516 515
517 D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n",sch,p); 516 D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
518 tasklet_schedule(&p->task); 517 tasklet_schedule(&p->task);
519 skb = p->link.q->dequeue(p->link.q); 518 skb = p->link.q->dequeue(p->link.q);
520 if (skb) sch->q.qlen--; 519 if (skb)
520 sch->q.qlen--;
521 return skb; 521 return skb;
522} 522}
523 523
524 524static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
525static int atm_tc_requeue(struct sk_buff *skb,struct Qdisc *sch)
526{ 525{
527 struct atm_qdisc_data *p = PRIV(sch); 526 struct atm_qdisc_data *p = PRIV(sch);
528 int ret; 527 int ret;
529 528
530 D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); 529 D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
531 ret = p->link.q->ops->requeue(skb,p->link.q); 530 ret = p->link.q->ops->requeue(skb, p->link.q);
532 if (!ret) { 531 if (!ret) {
533 sch->q.qlen++; 532 sch->q.qlen++;
534 sch->qstats.requeues++; 533 sch->qstats.requeues++;
535 } else { 534 } else {
536 sch->qstats.drops++; 535 sch->qstats.drops++;
537 p->link.qstats.drops++; 536 p->link.qstats.drops++;
538 } 537 }
539 return ret; 538 return ret;
540} 539}
541 540
542
543static unsigned int atm_tc_drop(struct Qdisc *sch) 541static unsigned int atm_tc_drop(struct Qdisc *sch)
544{ 542{
545 struct atm_qdisc_data *p = PRIV(sch); 543 struct atm_qdisc_data *p = PRIV(sch);
546 struct atm_flow_data *flow; 544 struct atm_flow_data *flow;
547 unsigned int len; 545 unsigned int len;
548 546
549 DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n",sch,p); 547 DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
550 for (flow = p->flows; flow; flow = flow->next) 548 for (flow = p->flows; flow; flow = flow->next)
551 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q))) 549 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
552 return len; 550 return len;
553 return 0; 551 return 0;
554} 552}
555 553
556 554static int atm_tc_init(struct Qdisc *sch, struct rtattr *opt)
557static int atm_tc_init(struct Qdisc *sch,struct rtattr *opt)
558{ 555{
559 struct atm_qdisc_data *p = PRIV(sch); 556 struct atm_qdisc_data *p = PRIV(sch);
560 557
561 DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); 558 DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
562 p->flows = &p->link; 559 p->flows = &p->link;
563 if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops, 560 if (!(p->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
564 sch->handle))) 561 sch->handle)))
565 p->link.q = &noop_qdisc; 562 p->link.q = &noop_qdisc;
566 DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q); 563 DPRINTK("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
567 p->link.filter_list = NULL; 564 p->link.filter_list = NULL;
568 p->link.vcc = NULL; 565 p->link.vcc = NULL;
569 p->link.sock = NULL; 566 p->link.sock = NULL;
570 p->link.classid = sch->handle; 567 p->link.classid = sch->handle;
571 p->link.ref = 1; 568 p->link.ref = 1;
572 p->link.next = NULL; 569 p->link.next = NULL;
573 tasklet_init(&p->task,sch_atm_dequeue,(unsigned long) sch); 570 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
574 return 0; 571 return 0;
575} 572}
576 573
577
578static void atm_tc_reset(struct Qdisc *sch) 574static void atm_tc_reset(struct Qdisc *sch)
579{ 575{
580 struct atm_qdisc_data *p = PRIV(sch); 576 struct atm_qdisc_data *p = PRIV(sch);
581 struct atm_flow_data *flow; 577 struct atm_flow_data *flow;
582 578
583 DPRINTK("atm_tc_reset(sch %p,[qdisc %p])\n",sch,p); 579 DPRINTK("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
584 for (flow = p->flows; flow; flow = flow->next) qdisc_reset(flow->q); 580 for (flow = p->flows; flow; flow = flow->next)
581 qdisc_reset(flow->q);
585 sch->q.qlen = 0; 582 sch->q.qlen = 0;
586} 583}
587 584
588
589static void atm_tc_destroy(struct Qdisc *sch) 585static void atm_tc_destroy(struct Qdisc *sch)
590{ 586{
591 struct atm_qdisc_data *p = PRIV(sch); 587 struct atm_qdisc_data *p = PRIV(sch);
592 struct atm_flow_data *flow; 588 struct atm_flow_data *flow;
593 589
594 DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n",sch,p); 590 DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
595 /* races ? */ 591 /* races ? */
596 while ((flow = p->flows)) { 592 while ((flow = p->flows)) {
597 tcf_destroy_chain(flow->filter_list); 593 tcf_destroy_chain(flow->filter_list);
598 flow->filter_list = NULL; 594 flow->filter_list = NULL;
599 if (flow->ref > 1) 595 if (flow->ref > 1)
600 printk(KERN_ERR "atm_destroy: %p->ref = %d\n",flow, 596 printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
601 flow->ref); 597 flow->ref);
602 atm_tc_put(sch,(unsigned long) flow); 598 atm_tc_put(sch, (unsigned long)flow);
603 if (p->flows == flow) { 599 if (p->flows == flow) {
604 printk(KERN_ERR "atm_destroy: putting flow %p didn't " 600 printk(KERN_ERR "atm_destroy: putting flow %p didn't "
605 "kill it\n",flow); 601 "kill it\n", flow);
606 p->flows = flow->next; /* brute force */ 602 p->flows = flow->next; /* brute force */
607 break; 603 break;
608 } 604 }
609 } 605 }
610 tasklet_kill(&p->task); 606 tasklet_kill(&p->task);
611} 607}
612 608
613
614static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, 609static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
615 struct sk_buff *skb, struct tcmsg *tcm) 610 struct sk_buff *skb, struct tcmsg *tcm)
616{ 611{
617 struct atm_qdisc_data *p = PRIV(sch); 612 struct atm_qdisc_data *p = PRIV(sch);
618 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 613 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
619 unsigned char *b = skb_tail_pointer(skb); 614 unsigned char *b = skb_tail_pointer(skb);
620 struct rtattr *rta; 615 struct rtattr *rta;
621 616
622 DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", 617 DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
623 sch,p,flow,skb,tcm); 618 sch, p, flow, skb, tcm);
624 if (!find_flow(p,flow)) return -EINVAL; 619 if (!find_flow(p, flow))
620 return -EINVAL;
625 tcm->tcm_handle = flow->classid; 621 tcm->tcm_handle = flow->classid;
626 tcm->tcm_info = flow->q->handle; 622 tcm->tcm_info = flow->q->handle;
627 rta = (struct rtattr *) b; 623 rta = (struct rtattr *)b;
628 RTA_PUT(skb,TCA_OPTIONS,0,NULL); 624 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
629 RTA_PUT(skb,TCA_ATM_HDR,flow->hdr_len,flow->hdr); 625 RTA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
630 if (flow->vcc) { 626 if (flow->vcc) {
631 struct sockaddr_atmpvc pvc; 627 struct sockaddr_atmpvc pvc;
632 int state; 628 int state;
@@ -635,16 +631,16 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
635 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; 631 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
636 pvc.sap_addr.vpi = flow->vcc->vpi; 632 pvc.sap_addr.vpi = flow->vcc->vpi;
637 pvc.sap_addr.vci = flow->vcc->vci; 633 pvc.sap_addr.vci = flow->vcc->vci;
638 RTA_PUT(skb,TCA_ATM_ADDR,sizeof(pvc),&pvc); 634 RTA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
639 state = ATM_VF2VS(flow->vcc->flags); 635 state = ATM_VF2VS(flow->vcc->flags);
640 RTA_PUT(skb,TCA_ATM_STATE,sizeof(state),&state); 636 RTA_PUT(skb, TCA_ATM_STATE, sizeof(state), &state);
641 } 637 }
642 if (flow->excess) 638 if (flow->excess)
643 RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(u32),&flow->classid); 639 RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(u32), &flow->classid);
644 else { 640 else {
645 static u32 zero; 641 static u32 zero;
646 642
647 RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(zero),&zero); 643 RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(zero), &zero);
648 } 644 }
649 rta->rta_len = skb_tail_pointer(skb) - b; 645 rta->rta_len = skb_tail_pointer(skb) - b;
650 return skb->len; 646 return skb->len;
@@ -655,9 +651,9 @@ rtattr_failure:
655} 651}
656static int 652static int
657atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, 653atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
658 struct gnet_dump *d) 654 struct gnet_dump *d)
659{ 655{
660 struct atm_flow_data *flow = (struct atm_flow_data *) arg; 656 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
661 657
662 flow->qstats.qlen = flow->q->q.qlen; 658 flow->qstats.qlen = flow->q->q.qlen;
663 659
@@ -674,38 +670,35 @@ static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
674} 670}
675 671
676static struct Qdisc_class_ops atm_class_ops = { 672static struct Qdisc_class_ops atm_class_ops = {
677 .graft = atm_tc_graft, 673 .graft = atm_tc_graft,
678 .leaf = atm_tc_leaf, 674 .leaf = atm_tc_leaf,
679 .get = atm_tc_get, 675 .get = atm_tc_get,
680 .put = atm_tc_put, 676 .put = atm_tc_put,
681 .change = atm_tc_change, 677 .change = atm_tc_change,
682 .delete = atm_tc_delete, 678 .delete = atm_tc_delete,
683 .walk = atm_tc_walk, 679 .walk = atm_tc_walk,
684 .tcf_chain = atm_tc_find_tcf, 680 .tcf_chain = atm_tc_find_tcf,
685 .bind_tcf = atm_tc_bind_filter, 681 .bind_tcf = atm_tc_bind_filter,
686 .unbind_tcf = atm_tc_put, 682 .unbind_tcf = atm_tc_put,
687 .dump = atm_tc_dump_class, 683 .dump = atm_tc_dump_class,
688 .dump_stats = atm_tc_dump_class_stats, 684 .dump_stats = atm_tc_dump_class_stats,
689}; 685};
690 686
691static struct Qdisc_ops atm_qdisc_ops = { 687static struct Qdisc_ops atm_qdisc_ops = {
692 .next = NULL, 688 .cl_ops = &atm_class_ops,
693 .cl_ops = &atm_class_ops, 689 .id = "atm",
694 .id = "atm", 690 .priv_size = sizeof(struct atm_qdisc_data),
695 .priv_size = sizeof(struct atm_qdisc_data), 691 .enqueue = atm_tc_enqueue,
696 .enqueue = atm_tc_enqueue, 692 .dequeue = atm_tc_dequeue,
697 .dequeue = atm_tc_dequeue, 693 .requeue = atm_tc_requeue,
698 .requeue = atm_tc_requeue, 694 .drop = atm_tc_drop,
699 .drop = atm_tc_drop, 695 .init = atm_tc_init,
700 .init = atm_tc_init, 696 .reset = atm_tc_reset,
701 .reset = atm_tc_reset, 697 .destroy = atm_tc_destroy,
702 .destroy = atm_tc_destroy, 698 .dump = atm_tc_dump,
703 .change = NULL, 699 .owner = THIS_MODULE,
704 .dump = atm_tc_dump,
705 .owner = THIS_MODULE,
706}; 700};
707 701
708
709static int __init atm_init(void) 702static int __init atm_init(void)
710{ 703{
711 return register_qdisc(&atm_qdisc_ops); 704 return register_qdisc(&atm_qdisc_ops);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index b184c3545145..e38c2839b25c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -82,7 +82,7 @@ struct cbq_class
82 unsigned char priority2; /* priority to be used after overlimit */ 82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */ 83 unsigned char ewma_log; /* time constant for idle time calculation */
84 unsigned char ovl_strategy; 84 unsigned char ovl_strategy;
85#ifdef CONFIG_NET_CLS_POLICE 85#ifdef CONFIG_NET_CLS_ACT
86 unsigned char police; 86 unsigned char police;
87#endif 87#endif
88 88
@@ -154,7 +154,7 @@ struct cbq_sched_data
154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes 154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
155 with backlog */ 155 with backlog */
156 156
157#ifdef CONFIG_NET_CLS_POLICE 157#ifdef CONFIG_NET_CLS_ACT
158 struct cbq_class *rx_class; 158 struct cbq_class *rx_class;
159#endif 159#endif
160 struct cbq_class *tx_class; 160 struct cbq_class *tx_class;
@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
196 return NULL; 196 return NULL;
197} 197}
198 198
199#ifdef CONFIG_NET_CLS_POLICE 199#ifdef CONFIG_NET_CLS_ACT
200 200
201static struct cbq_class * 201static struct cbq_class *
202cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) 202cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
@@ -247,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
247 /* 247 /*
248 * Step 2+n. Apply classifier. 248 * Step 2+n. Apply classifier.
249 */ 249 */
250 if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0) 250 if (!head->filter_list ||
251 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
251 goto fallback; 252 goto fallback;
252 253
253 if ((cl = (void*)res.class) == NULL) { 254 if ((cl = (void*)res.class) == NULL) {
@@ -267,15 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
267 *qerr = NET_XMIT_SUCCESS; 268 *qerr = NET_XMIT_SUCCESS;
268 case TC_ACT_SHOT: 269 case TC_ACT_SHOT:
269 return NULL; 270 return NULL;
270 } 271 case TC_ACT_RECLASSIFY:
271#elif defined(CONFIG_NET_CLS_POLICE)
272 switch (result) {
273 case TC_POLICE_RECLASSIFY:
274 return cbq_reclassify(skb, cl); 272 return cbq_reclassify(skb, cl);
275 case TC_POLICE_SHOT:
276 return NULL;
277 default:
278 break;
279 } 273 }
280#endif 274#endif
281 if (cl->level == 0) 275 if (cl->level == 0)
@@ -389,7 +383,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
389 int ret; 383 int ret;
390 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 384 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
391 385
392#ifdef CONFIG_NET_CLS_POLICE 386#ifdef CONFIG_NET_CLS_ACT
393 q->rx_class = cl; 387 q->rx_class = cl;
394#endif 388#endif
395 if (cl == NULL) { 389 if (cl == NULL) {
@@ -399,7 +393,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
399 return ret; 393 return ret;
400 } 394 }
401 395
402#ifdef CONFIG_NET_CLS_POLICE 396#ifdef CONFIG_NET_CLS_ACT
403 cl->q->__parent = sch; 397 cl->q->__parent = sch;
404#endif 398#endif
405 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { 399 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
@@ -434,7 +428,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
434 428
435 cbq_mark_toplevel(q, cl); 429 cbq_mark_toplevel(q, cl);
436 430
437#ifdef CONFIG_NET_CLS_POLICE 431#ifdef CONFIG_NET_CLS_ACT
438 q->rx_class = cl; 432 q->rx_class = cl;
439 cl->q->__parent = sch; 433 cl->q->__parent = sch;
440#endif 434#endif
@@ -669,9 +663,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
669 return HRTIMER_NORESTART; 663 return HRTIMER_NORESTART;
670} 664}
671 665
672 666#ifdef CONFIG_NET_CLS_ACT
673#ifdef CONFIG_NET_CLS_POLICE
674
675static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) 667static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
676{ 668{
677 int len = skb->len; 669 int len = skb->len;
@@ -1364,7 +1356,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1364 return 0; 1356 return 0;
1365} 1357}
1366 1358
1367#ifdef CONFIG_NET_CLS_POLICE 1359#ifdef CONFIG_NET_CLS_ACT
1368static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) 1360static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1369{ 1361{
1370 cl->police = p->police; 1362 cl->police = p->police;
@@ -1532,7 +1524,7 @@ rtattr_failure:
1532 return -1; 1524 return -1;
1533} 1525}
1534 1526
1535#ifdef CONFIG_NET_CLS_POLICE 1527#ifdef CONFIG_NET_CLS_ACT
1536static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) 1528static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1537{ 1529{
1538 unsigned char *b = skb_tail_pointer(skb); 1530 unsigned char *b = skb_tail_pointer(skb);
@@ -1558,7 +1550,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1558 cbq_dump_rate(skb, cl) < 0 || 1550 cbq_dump_rate(skb, cl) < 0 ||
1559 cbq_dump_wrr(skb, cl) < 0 || 1551 cbq_dump_wrr(skb, cl) < 0 ||
1560 cbq_dump_ovl(skb, cl) < 0 || 1552 cbq_dump_ovl(skb, cl) < 0 ||
1561#ifdef CONFIG_NET_CLS_POLICE 1553#ifdef CONFIG_NET_CLS_ACT
1562 cbq_dump_police(skb, cl) < 0 || 1554 cbq_dump_police(skb, cl) < 0 ||
1563#endif 1555#endif
1564 cbq_dump_fopt(skb, cl) < 0) 1556 cbq_dump_fopt(skb, cl) < 0)
@@ -1653,7 +1645,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1653 cl->classid)) == NULL) 1645 cl->classid)) == NULL)
1654 return -ENOBUFS; 1646 return -ENOBUFS;
1655 } else { 1647 } else {
1656#ifdef CONFIG_NET_CLS_POLICE 1648#ifdef CONFIG_NET_CLS_ACT
1657 if (cl->police == TC_POLICE_RECLASSIFY) 1649 if (cl->police == TC_POLICE_RECLASSIFY)
1658 new->reshape_fail = cbq_reshape_fail; 1650 new->reshape_fail = cbq_reshape_fail;
1659#endif 1651#endif
@@ -1718,7 +1710,7 @@ cbq_destroy(struct Qdisc* sch)
1718 struct cbq_class *cl; 1710 struct cbq_class *cl;
1719 unsigned h; 1711 unsigned h;
1720 1712
1721#ifdef CONFIG_NET_CLS_POLICE 1713#ifdef CONFIG_NET_CLS_ACT
1722 q->rx_class = NULL; 1714 q->rx_class = NULL;
1723#endif 1715#endif
1724 /* 1716 /*
@@ -1747,7 +1739,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1747 struct cbq_class *cl = (struct cbq_class*)arg; 1739 struct cbq_class *cl = (struct cbq_class*)arg;
1748 1740
1749 if (--cl->refcnt == 0) { 1741 if (--cl->refcnt == 0) {
1750#ifdef CONFIG_NET_CLS_POLICE 1742#ifdef CONFIG_NET_CLS_ACT
1751 struct cbq_sched_data *q = qdisc_priv(sch); 1743 struct cbq_sched_data *q = qdisc_priv(sch);
1752 1744
1753 spin_lock_bh(&sch->dev->queue_lock); 1745 spin_lock_bh(&sch->dev->queue_lock);
@@ -1795,7 +1787,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1795 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) 1787 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
1796 return -EINVAL; 1788 return -EINVAL;
1797 1789
1798#ifdef CONFIG_NET_CLS_POLICE 1790#ifdef CONFIG_NET_CLS_ACT
1799 if (tb[TCA_CBQ_POLICE-1] && 1791 if (tb[TCA_CBQ_POLICE-1] &&
1800 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) 1792 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
1801 return -EINVAL; 1793 return -EINVAL;
@@ -1838,7 +1830,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1838 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1830 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1839 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1831 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1840 1832
1841#ifdef CONFIG_NET_CLS_POLICE 1833#ifdef CONFIG_NET_CLS_ACT
1842 if (tb[TCA_CBQ_POLICE-1]) 1834 if (tb[TCA_CBQ_POLICE-1])
1843 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1835 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1844#endif 1836#endif
@@ -1931,7 +1923,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
1931 cl->overlimit = cbq_ovl_classic; 1923 cl->overlimit = cbq_ovl_classic;
1932 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1924 if (tb[TCA_CBQ_OVL_STRATEGY-1])
1933 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1925 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
1934#ifdef CONFIG_NET_CLS_POLICE 1926#ifdef CONFIG_NET_CLS_ACT
1935 if (tb[TCA_CBQ_POLICE-1]) 1927 if (tb[TCA_CBQ_POLICE-1])
1936 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1928 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
1937#endif 1929#endif
@@ -1975,7 +1967,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1975 q->tx_class = NULL; 1967 q->tx_class = NULL;
1976 q->tx_borrowed = NULL; 1968 q->tx_borrowed = NULL;
1977 } 1969 }
1978#ifdef CONFIG_NET_CLS_POLICE 1970#ifdef CONFIG_NET_CLS_ACT
1979 if (q->rx_class == cl) 1971 if (q->rx_class == cl)
1980 q->rx_class = NULL; 1972 q->rx_class = NULL;
1981#endif 1973#endif
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 4d2c233a8611..60f89199e3da 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -237,25 +237,23 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
237 D2PRINTK("result %d class 0x%04x\n", result, res.classid); 237 D2PRINTK("result %d class 0x%04x\n", result, res.classid);
238 238
239 switch (result) { 239 switch (result) {
240#ifdef CONFIG_NET_CLS_POLICE 240#ifdef CONFIG_NET_CLS_ACT
241 case TC_POLICE_SHOT: 241 case TC_ACT_QUEUED:
242 kfree_skb(skb); 242 case TC_ACT_STOLEN:
243 sch->qstats.drops++; 243 kfree_skb(skb);
244 return NET_XMIT_POLICED; 244 return NET_XMIT_SUCCESS;
245#if 0 245 case TC_ACT_SHOT:
246 case TC_POLICE_RECLASSIFY: 246 kfree_skb(skb);
247 /* FIXME: what to do here ??? */ 247 sch->qstats.drops++;
248 return NET_XMIT_BYPASS;
248#endif 249#endif
249#endif 250 case TC_ACT_OK:
250 case TC_POLICE_OK: 251 skb->tc_index = TC_H_MIN(res.classid);
251 skb->tc_index = TC_H_MIN(res.classid); 252 break;
252 break; 253 default:
253 case TC_POLICE_UNSPEC: 254 if (p->default_index != NO_DEFAULT_INDEX)
254 /* fall through */ 255 skb->tc_index = p->default_index;
255 default: 256 break;
256 if (p->default_index != NO_DEFAULT_INDEX)
257 skb->tc_index = p->default_index;
258 break;
259 } 257 }
260 } 258 }
261 259
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 874452c41a01..55e7e4530f43 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1174,9 +1174,6 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1174 case TC_ACT_SHOT: 1174 case TC_ACT_SHOT:
1175 return NULL; 1175 return NULL;
1176 } 1176 }
1177#elif defined(CONFIG_NET_CLS_POLICE)
1178 if (result == TC_POLICE_SHOT)
1179 return NULL;
1180#endif 1177#endif
1181 if ((cl = (struct hfsc_class *)res.class) == NULL) { 1178 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1182 if ((cl = hfsc_find_class(res.classid, sch)) == NULL) 1179 if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index b417a95df322..246a2f9765f1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -249,9 +249,6 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
249 case TC_ACT_SHOT: 249 case TC_ACT_SHOT:
250 return NULL; 250 return NULL;
251 } 251 }
252#elif defined(CONFIG_NET_CLS_POLICE)
253 if (result == TC_POLICE_SHOT)
254 return HTB_DIRECT;
255#endif 252#endif
256 if ((cl = (void *)res.class) == NULL) { 253 if ((cl = (void *)res.class) == NULL) {
257 if (res.classid == sch->handle) 254 if (res.classid == sch->handle)
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index cd0aab6a2a7c..51f16b0af198 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -164,31 +164,12 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
164 result = TC_ACT_OK; 164 result = TC_ACT_OK;
165 break; 165 break;
166 } 166 }
167/* backward compat */
168#else
169#ifdef CONFIG_NET_CLS_POLICE
170 switch (result) {
171 case TC_POLICE_SHOT:
172 result = NF_DROP;
173 sch->qstats.drops++;
174 break;
175 case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */
176 case TC_POLICE_OK:
177 case TC_POLICE_UNSPEC:
178 default:
179 sch->bstats.packets++;
180 sch->bstats.bytes += skb->len;
181 result = NF_ACCEPT;
182 break;
183 }
184
185#else 167#else
186 D2PRINTK("Overriding result to ACCEPT\n"); 168 D2PRINTK("Overriding result to ACCEPT\n");
187 result = NF_ACCEPT; 169 result = NF_ACCEPT;
188 sch->bstats.packets++; 170 sch->bstats.packets++;
189 sch->bstats.bytes += skb->len; 171 sch->bstats.bytes += skb->len;
190#endif 172#endif
191#endif
192 173
193 return result; 174 return result;
194} 175}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 22e431dace54..8c2639af4c6a 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -125,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
125 125
126 if (skb->len > q->max_size) { 126 if (skb->len > q->max_size) {
127 sch->qstats.drops++; 127 sch->qstats.drops++;
128#ifdef CONFIG_NET_CLS_POLICE 128#ifdef CONFIG_NET_CLS_ACT
129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
130#endif 130#endif
131 kfree_skb(skb); 131 kfree_skb(skb);