aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /net/sched/sch_htb.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c147
1 files changed, 78 insertions, 69 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 4be8d04b262d..29b942ce9e82 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -99,9 +99,10 @@ struct htb_class {
99 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ 99 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
100 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ 100 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
101 /* When class changes from state 1->2 and disconnects from 101 /* When class changes from state 1->2 and disconnects from
102 parent's feed then we lost ptr value and start from the 102 * parent's feed then we lost ptr value and start from the
103 first child again. Here we store classid of the 103 * first child again. Here we store classid of the
104 last valid ptr (used when ptr is NULL). */ 104 * last valid ptr (used when ptr is NULL).
105 */
105 u32 last_ptr_id[TC_HTB_NUMPRIO]; 106 u32 last_ptr_id[TC_HTB_NUMPRIO];
106 } inner; 107 } inner;
107 } un; 108 } un;
@@ -182,10 +183,10 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
182 * filters in qdisc and in inner nodes (if higher filter points to the inner 183 * filters in qdisc and in inner nodes (if higher filter points to the inner
183 * node). If we end up with classid MAJOR:0 we enqueue the skb into special 184 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
184 * internal fifo (direct). These packets then go directly thru. If we still 185 * internal fifo (direct). These packets then go directly thru. If we still
185 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull 186 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
186 * then finish and return direct queue. 187 * then finish and return direct queue.
187 */ 188 */
188#define HTB_DIRECT (struct htb_class*)-1 189#define HTB_DIRECT ((struct htb_class *)-1L)
189 190
190static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, 191static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
191 int *qerr) 192 int *qerr)
@@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
197 int result; 198 int result;
198 199
199 /* allow to select class by setting skb->priority to valid classid; 200 /* allow to select class by setting skb->priority to valid classid;
200 note that nfmark can be used too by attaching filter fw with no 201 * note that nfmark can be used too by attaching filter fw with no
201 rules in it */ 202 * rules in it
203 */
202 if (skb->priority == sch->handle) 204 if (skb->priority == sch->handle)
203 return HTB_DIRECT; /* X:0 (direct flow) selected */ 205 return HTB_DIRECT; /* X:0 (direct flow) selected */
204 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) 206 cl = htb_find(skb->priority, sch);
207 if (cl && cl->level == 0)
205 return cl; 208 return cl;
206 209
207 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 210 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
216 return NULL; 219 return NULL;
217 } 220 }
218#endif 221#endif
219 if ((cl = (void *)res.class) == NULL) { 222 cl = (void *)res.class;
223 if (!cl) {
220 if (res.classid == sch->handle) 224 if (res.classid == sch->handle)
221 return HTB_DIRECT; /* X:0 (direct flow) */ 225 return HTB_DIRECT; /* X:0 (direct flow) */
222 if ((cl = htb_find(res.classid, sch)) == NULL) 226 cl = htb_find(res.classid, sch);
227 if (!cl)
223 break; /* filter selected invalid classid */ 228 break; /* filter selected invalid classid */
224 } 229 }
225 if (!cl->level) 230 if (!cl->level)
@@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
378 383
379 if (p->un.inner.feed[prio].rb_node) 384 if (p->un.inner.feed[prio].rb_node)
380 /* parent already has its feed in use so that 385 /* parent already has its feed in use so that
381 reset bit in mask as parent is already ok */ 386 * reset bit in mask as parent is already ok
387 */
382 mask &= ~(1 << prio); 388 mask &= ~(1 << prio);
383 389
384 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); 390 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
413 419
414 if (p->un.inner.ptr[prio] == cl->node + prio) { 420 if (p->un.inner.ptr[prio] == cl->node + prio) {
415 /* we are removing child which is pointed to from 421 /* we are removing child which is pointed to from
416 parent feed - forget the pointer but remember 422 * parent feed - forget the pointer but remember
417 classid */ 423 * classid
424 */
418 p->un.inner.last_ptr_id[prio] = cl->common.classid; 425 p->un.inner.last_ptr_id[prio] = cl->common.classid;
419 p->un.inner.ptr[prio] = NULL; 426 p->un.inner.ptr[prio] = NULL;
420 } 427 }
@@ -569,15 +576,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
569 } 576 }
570 return ret; 577 return ret;
571 } else { 578 } else {
572 cl->bstats.packets += 579 bstats_update(&cl->bstats, skb);
573 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
574 cl->bstats.bytes += qdisc_pkt_len(skb);
575 htb_activate(q, cl); 580 htb_activate(q, cl);
576 } 581 }
577 582
578 sch->q.qlen++; 583 sch->q.qlen++;
579 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
580 sch->bstats.bytes += qdisc_pkt_len(skb);
581 return NET_XMIT_SUCCESS; 584 return NET_XMIT_SUCCESS;
582} 585}
583 586
@@ -648,12 +651,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
648 htb_add_to_wait_tree(q, cl, diff); 651 htb_add_to_wait_tree(q, cl, diff);
649 } 652 }
650 653
651 /* update byte stats except for leaves which are already updated */ 654 /* update basic stats except for leaves which are already updated */
652 if (cl->level) { 655 if (cl->level)
653 cl->bstats.bytes += bytes; 656 bstats_update(&cl->bstats, skb);
654 cl->bstats.packets += skb_is_gso(skb)? 657
655 skb_shinfo(skb)->gso_segs:1;
656 }
657 cl = cl->parent; 658 cl = cl->parent;
658 } 659 }
659} 660}
@@ -669,8 +670,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
669 unsigned long start) 670 unsigned long start)
670{ 671{
671 /* don't run for longer than 2 jiffies; 2 is used instead of 672 /* don't run for longer than 2 jiffies; 2 is used instead of
672 1 to simplify things when jiffy is going to be incremented 673 * 1 to simplify things when jiffy is going to be incremented
673 too soon */ 674 * too soon
675 */
674 unsigned long stop_at = start + 2; 676 unsigned long stop_at = start + 2;
675 while (time_before(jiffies, stop_at)) { 677 while (time_before(jiffies, stop_at)) {
676 struct htb_class *cl; 678 struct htb_class *cl;
@@ -693,7 +695,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
693 695
694 /* too much load - let's continue after a break for scheduling */ 696 /* too much load - let's continue after a break for scheduling */
695 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { 697 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
696 printk(KERN_WARNING "htb: too many events!\n"); 698 pr_warning("htb: too many events!\n");
697 q->warned |= HTB_WARN_TOOMANYEVENTS; 699 q->warned |= HTB_WARN_TOOMANYEVENTS;
698 } 700 }
699 701
@@ -701,7 +703,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
701} 703}
702 704
703/* Returns class->node+prio from id-tree where classe's id is >= id. NULL 705/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
704 is no such one exists. */ 706 * is no such one exists.
707 */
705static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, 708static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
706 u32 id) 709 u32 id)
707{ 710{
@@ -745,12 +748,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
745 for (i = 0; i < 65535; i++) { 748 for (i = 0; i < 65535; i++) {
746 if (!*sp->pptr && *sp->pid) { 749 if (!*sp->pptr && *sp->pid) {
747 /* ptr was invalidated but id is valid - try to recover 750 /* ptr was invalidated but id is valid - try to recover
748 the original or next ptr */ 751 * the original or next ptr
752 */
749 *sp->pptr = 753 *sp->pptr =
750 htb_id_find_next_upper(prio, sp->root, *sp->pid); 754 htb_id_find_next_upper(prio, sp->root, *sp->pid);
751 } 755 }
752 *sp->pid = 0; /* ptr is valid now so that remove this hint as it 756 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
753 can become out of date quickly */ 757 * can become out of date quickly
758 */
754 if (!*sp->pptr) { /* we are at right end; rewind & go up */ 759 if (!*sp->pptr) { /* we are at right end; rewind & go up */
755 *sp->pptr = sp->root; 760 *sp->pptr = sp->root;
756 while ((*sp->pptr)->rb_left) 761 while ((*sp->pptr)->rb_left)
@@ -778,7 +783,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
778} 783}
779 784
780/* dequeues packet at given priority and level; call only if 785/* dequeues packet at given priority and level; call only if
781 you are sure that there is active class at prio/level */ 786 * you are sure that there is active class at prio/level
787 */
782static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, 788static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
783 int level) 789 int level)
784{ 790{
@@ -795,9 +801,10 @@ next:
795 return NULL; 801 return NULL;
796 802
797 /* class can be empty - it is unlikely but can be true if leaf 803 /* class can be empty - it is unlikely but can be true if leaf
798 qdisc drops packets in enqueue routine or if someone used 804 * qdisc drops packets in enqueue routine or if someone used
799 graft operation on the leaf since last dequeue; 805 * graft operation on the leaf since last dequeue;
800 simply deactivate and skip such class */ 806 * simply deactivate and skip such class
807 */
801 if (unlikely(cl->un.leaf.q->q.qlen == 0)) { 808 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
802 struct htb_class *next; 809 struct htb_class *next;
803 htb_deactivate(q, cl); 810 htb_deactivate(q, cl);
@@ -837,7 +844,8 @@ next:
837 ptr[0]) + prio); 844 ptr[0]) + prio);
838 } 845 }
839 /* this used to be after charge_class but this constelation 846 /* this used to be after charge_class but this constelation
840 gives us slightly better performance */ 847 * gives us slightly better performance
848 */
841 if (!cl->un.leaf.q->q.qlen) 849 if (!cl->un.leaf.q->q.qlen)
842 htb_deactivate(q, cl); 850 htb_deactivate(q, cl);
843 htb_charge_class(q, cl, level, skb); 851 htb_charge_class(q, cl, level, skb);
@@ -847,7 +855,7 @@ next:
847 855
848static struct sk_buff *htb_dequeue(struct Qdisc *sch) 856static struct sk_buff *htb_dequeue(struct Qdisc *sch)
849{ 857{
850 struct sk_buff *skb = NULL; 858 struct sk_buff *skb;
851 struct htb_sched *q = qdisc_priv(sch); 859 struct htb_sched *q = qdisc_priv(sch);
852 int level; 860 int level;
853 psched_time_t next_event; 861 psched_time_t next_event;
@@ -856,7 +864,9 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
856 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 864 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
857 skb = __skb_dequeue(&q->direct_queue); 865 skb = __skb_dequeue(&q->direct_queue);
858 if (skb != NULL) { 866 if (skb != NULL) {
859 sch->flags &= ~TCQ_F_THROTTLED; 867ok:
868 qdisc_bstats_update(sch, skb);
869 qdisc_unthrottled(sch);
860 sch->q.qlen--; 870 sch->q.qlen--;
861 return skb; 871 return skb;
862 } 872 }
@@ -887,13 +897,11 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
887 m = ~q->row_mask[level]; 897 m = ~q->row_mask[level];
888 while (m != (int)(-1)) { 898 while (m != (int)(-1)) {
889 int prio = ffz(m); 899 int prio = ffz(m);
900
890 m |= 1 << prio; 901 m |= 1 << prio;
891 skb = htb_dequeue_tree(q, prio, level); 902 skb = htb_dequeue_tree(q, prio, level);
892 if (likely(skb != NULL)) { 903 if (likely(skb != NULL))
893 sch->q.qlen--; 904 goto ok;
894 sch->flags &= ~TCQ_F_THROTTLED;
895 goto fin;
896 }
897 } 905 }
898 } 906 }
899 sch->qstats.overlimits++; 907 sch->qstats.overlimits++;
@@ -994,13 +1002,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
994 return err; 1002 return err;
995 1003
996 if (tb[TCA_HTB_INIT] == NULL) { 1004 if (tb[TCA_HTB_INIT] == NULL) {
997 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); 1005 pr_err("HTB: hey probably you have bad tc tool ?\n");
998 return -EINVAL; 1006 return -EINVAL;
999 } 1007 }
1000 gopt = nla_data(tb[TCA_HTB_INIT]); 1008 gopt = nla_data(tb[TCA_HTB_INIT]);
1001 if (gopt->version != HTB_VER >> 16) { 1009 if (gopt->version != HTB_VER >> 16) {
1002 printk(KERN_ERR 1010 pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
1003 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1004 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); 1011 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1005 return -EINVAL; 1012 return -EINVAL;
1006 } 1013 }
@@ -1121,8 +1128,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1121 if (cl->level) 1128 if (cl->level)
1122 return -EINVAL; 1129 return -EINVAL;
1123 if (new == NULL && 1130 if (new == NULL &&
1124 (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1131 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1125 &pfifo_qdisc_ops,
1126 cl->common.classid)) == NULL) 1132 cl->common.classid)) == NULL)
1127 return -ENOBUFS; 1133 return -ENOBUFS;
1128 1134
@@ -1214,9 +1220,10 @@ static void htb_destroy(struct Qdisc *sch)
1214 cancel_work_sync(&q->work); 1220 cancel_work_sync(&q->work);
1215 qdisc_watchdog_cancel(&q->watchdog); 1221 qdisc_watchdog_cancel(&q->watchdog);
1216 /* This line used to be after htb_destroy_class call below 1222 /* This line used to be after htb_destroy_class call below
1217 and surprisingly it worked in 2.4. But it must precede it 1223 * and surprisingly it worked in 2.4. But it must precede it
1218 because filter need its target class alive to be able to call 1224 * because filter need its target class alive to be able to call
1219 unbind_filter on it (without Oops). */ 1225 * unbind_filter on it (without Oops).
1226 */
1220 tcf_destroy_chain(&q->filter_list); 1227 tcf_destroy_chain(&q->filter_list);
1221 1228
1222 for (i = 0; i < q->clhash.hashsize; i++) { 1229 for (i = 0; i < q->clhash.hashsize; i++) {
@@ -1247,8 +1254,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1247 return -EBUSY; 1254 return -EBUSY;
1248 1255
1249 if (!cl->level && htb_parent_last_child(cl)) { 1256 if (!cl->level && htb_parent_last_child(cl)) {
1250 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1257 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1251 &pfifo_qdisc_ops,
1252 cl->parent->common.classid); 1258 cl->parent->common.classid);
1253 last_child = 1; 1259 last_child = 1;
1254 } 1260 }
@@ -1302,14 +1308,14 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1302 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1308 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1303 struct nlattr *opt = tca[TCA_OPTIONS]; 1309 struct nlattr *opt = tca[TCA_OPTIONS];
1304 struct qdisc_rate_table *rtab = NULL, *ctab = NULL; 1310 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1305 struct nlattr *tb[TCA_HTB_RTAB + 1]; 1311 struct nlattr *tb[__TCA_HTB_MAX];
1306 struct tc_htb_opt *hopt; 1312 struct tc_htb_opt *hopt;
1307 1313
1308 /* extract all subattrs from opt attr */ 1314 /* extract all subattrs from opt attr */
1309 if (!opt) 1315 if (!opt)
1310 goto failure; 1316 goto failure;
1311 1317
1312 err = nla_parse_nested(tb, TCA_HTB_RTAB, opt, htb_policy); 1318 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1313 if (err < 0) 1319 if (err < 0)
1314 goto failure; 1320 goto failure;
1315 1321
@@ -1351,11 +1357,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1351 1357
1352 /* check maximal depth */ 1358 /* check maximal depth */
1353 if (parent && parent->parent && parent->parent->level < 2) { 1359 if (parent && parent->parent && parent->parent->level < 2) {
1354 printk(KERN_ERR "htb: tree is too deep\n"); 1360 pr_err("htb: tree is too deep\n");
1355 goto failure; 1361 goto failure;
1356 } 1362 }
1357 err = -ENOBUFS; 1363 err = -ENOBUFS;
1358 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1364 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1365 if (!cl)
1359 goto failure; 1366 goto failure;
1360 1367
1361 err = gen_new_estimator(&cl->bstats, &cl->rate_est, 1368 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@@ -1375,9 +1382,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1375 RB_CLEAR_NODE(&cl->node[prio]); 1382 RB_CLEAR_NODE(&cl->node[prio]);
1376 1383
1377 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) 1384 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1378 so that can't be used inside of sch_tree_lock 1385 * so that can't be used inside of sch_tree_lock
1379 -- thanks to Karlis Peisenieks */ 1386 * -- thanks to Karlis Peisenieks
1380 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 1387 */
1388 new_q = qdisc_create_dflt(sch->dev_queue,
1381 &pfifo_qdisc_ops, classid); 1389 &pfifo_qdisc_ops, classid);
1382 sch_tree_lock(sch); 1390 sch_tree_lock(sch);
1383 if (parent && !parent->level) { 1391 if (parent && !parent->level) {
@@ -1428,17 +1436,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1428 } 1436 }
1429 1437
1430 /* it used to be a nasty bug here, we have to check that node 1438 /* it used to be a nasty bug here, we have to check that node
1431 is really leaf before changing cl->un.leaf ! */ 1439 * is really leaf before changing cl->un.leaf !
1440 */
1432 if (!cl->level) { 1441 if (!cl->level) {
1433 cl->quantum = rtab->rate.rate / q->rate2quantum; 1442 cl->quantum = rtab->rate.rate / q->rate2quantum;
1434 if (!hopt->quantum && cl->quantum < 1000) { 1443 if (!hopt->quantum && cl->quantum < 1000) {
1435 printk(KERN_WARNING 1444 pr_warning(
1436 "HTB: quantum of class %X is small. Consider r2q change.\n", 1445 "HTB: quantum of class %X is small. Consider r2q change.\n",
1437 cl->common.classid); 1446 cl->common.classid);
1438 cl->quantum = 1000; 1447 cl->quantum = 1000;
1439 } 1448 }
1440 if (!hopt->quantum && cl->quantum > 200000) { 1449 if (!hopt->quantum && cl->quantum > 200000) {
1441 printk(KERN_WARNING 1450 pr_warning(
1442 "HTB: quantum of class %X is big. Consider r2q change.\n", 1451 "HTB: quantum of class %X is big. Consider r2q change.\n",
1443 cl->common.classid); 1452 cl->common.classid);
1444 cl->quantum = 200000; 1453 cl->quantum = 200000;
@@ -1487,13 +1496,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1487 struct htb_class *cl = htb_find(classid, sch); 1496 struct htb_class *cl = htb_find(classid, sch);
1488 1497
1489 /*if (cl && !cl->level) return 0; 1498 /*if (cl && !cl->level) return 0;
1490 The line above used to be there to prevent attaching filters to 1499 * The line above used to be there to prevent attaching filters to
1491 leaves. But at least tc_index filter uses this just to get class 1500 * leaves. But at least tc_index filter uses this just to get class
1492 for other reasons so that we have to allow for it. 1501 * for other reasons so that we have to allow for it.
1493 ---- 1502 * ----
1494 19.6.2002 As Werner explained it is ok - bind filter is just 1503 * 19.6.2002 As Werner explained it is ok - bind filter is just
1495 another way to "lock" the class - unlike "get" this lock can 1504 * another way to "lock" the class - unlike "get" this lock can
1496 be broken by class during destroy IIUC. 1505 * be broken by class during destroy IIUC.
1497 */ 1506 */
1498 if (cl) 1507 if (cl)
1499 cl->filter_cnt++; 1508 cl->filter_cnt++;