diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-08-11 02:36:01 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-09-22 17:54:34 -0400 |
commit | 3696f625e2efa1f1b228b276788274e1eb86fcfa (patch) | |
tree | 2dfe12d28e7b0017fd3594bf5d1fd46cd2ff0c7e /net/sched/sch_htb.c | |
parent | 0cef296da9331e871401076b8c0688b2b31fcadd (diff) |
[HTB]: rbtree cleanup
Add code to initialize rb tree nodes, and check for double deletion.
This is not a real fix, but I can make it trap sometimes and may
be a bandaid for: http://bugzilla.kernel.org/show_bug.cgi?id=6681
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r-- | net/sched/sch_htb.c | 34 |
1 files changed, 27 insertions, 7 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index a686b9511b05..bb3ddd4784b1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -366,7 +366,7 @@ static void htb_add_to_wait_tree(struct htb_sched *q, | |||
366 | * When we are past last key we return NULL. | 366 | * When we are past last key we return NULL. |
367 | * Average complexity is 2 steps per call. | 367 | * Average complexity is 2 steps per call. |
368 | */ | 368 | */ |
369 | static void htb_next_rb_node(struct rb_node **n) | 369 | static inline void htb_next_rb_node(struct rb_node **n) |
370 | { | 370 | { |
371 | *n = rb_next(*n); | 371 | *n = rb_next(*n); |
372 | } | 372 | } |
@@ -388,6 +388,18 @@ static inline void htb_add_class_to_row(struct htb_sched *q, | |||
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | /* If this triggers, it is a bug in this code, but it need not be fatal */ | ||
392 | static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root) | ||
393 | { | ||
394 | if (RB_EMPTY_NODE(rb)) { | ||
395 | WARN_ON(1); | ||
396 | } else { | ||
397 | rb_erase(rb, root); | ||
398 | RB_CLEAR_NODE(rb); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | |||
391 | /** | 403 | /** |
392 | * htb_remove_class_from_row - removes class from its row | 404 | * htb_remove_class_from_row - removes class from its row |
393 | * | 405 | * |
@@ -401,10 +413,12 @@ static inline void htb_remove_class_from_row(struct htb_sched *q, | |||
401 | 413 | ||
402 | while (mask) { | 414 | while (mask) { |
403 | int prio = ffz(~mask); | 415 | int prio = ffz(~mask); |
416 | |||
404 | mask &= ~(1 << prio); | 417 | mask &= ~(1 << prio); |
405 | if (q->ptr[cl->level][prio] == cl->node + prio) | 418 | if (q->ptr[cl->level][prio] == cl->node + prio) |
406 | htb_next_rb_node(q->ptr[cl->level] + prio); | 419 | htb_next_rb_node(q->ptr[cl->level] + prio); |
407 | rb_erase(cl->node + prio, q->row[cl->level] + prio); | 420 | |
421 | htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio); | ||
408 | if (!q->row[cl->level][prio].rb_node) | 422 | if (!q->row[cl->level][prio].rb_node) |
409 | m |= 1 << prio; | 423 | m |= 1 << prio; |
410 | } | 424 | } |
@@ -472,7 +486,7 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) | |||
472 | p->un.inner.ptr[prio] = NULL; | 486 | p->un.inner.ptr[prio] = NULL; |
473 | } | 487 | } |
474 | 488 | ||
475 | rb_erase(cl->node + prio, p->un.inner.feed + prio); | 489 | htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio); |
476 | 490 | ||
477 | if (!p->un.inner.feed[prio].rb_node) | 491 | if (!p->un.inner.feed[prio].rb_node) |
478 | mask |= 1 << prio; | 492 | mask |= 1 << prio; |
@@ -739,7 +753,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
739 | htb_change_class_mode(q, cl, &diff); | 753 | htb_change_class_mode(q, cl, &diff); |
740 | if (old_mode != cl->cmode) { | 754 | if (old_mode != cl->cmode) { |
741 | if (old_mode != HTB_CAN_SEND) | 755 | if (old_mode != HTB_CAN_SEND) |
742 | rb_erase(&cl->pq_node, q->wait_pq + cl->level); | 756 | htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); |
743 | if (cl->cmode != HTB_CAN_SEND) | 757 | if (cl->cmode != HTB_CAN_SEND) |
744 | htb_add_to_wait_tree(q, cl, diff); | 758 | htb_add_to_wait_tree(q, cl, diff); |
745 | } | 759 | } |
@@ -782,7 +796,7 @@ static long htb_do_events(struct htb_sched *q, int level) | |||
782 | if (time_after(cl->pq_key, q->jiffies)) { | 796 | if (time_after(cl->pq_key, q->jiffies)) { |
783 | return cl->pq_key - q->jiffies; | 797 | return cl->pq_key - q->jiffies; |
784 | } | 798 | } |
785 | rb_erase(p, q->wait_pq + level); | 799 | htb_safe_rb_erase(p, q->wait_pq + level); |
786 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); | 800 | diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); |
787 | htb_change_class_mode(q, cl, &diff); | 801 | htb_change_class_mode(q, cl, &diff); |
788 | if (cl->cmode != HTB_CAN_SEND) | 802 | if (cl->cmode != HTB_CAN_SEND) |
@@ -1279,7 +1293,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1279 | htb_deactivate(q, cl); | 1293 | htb_deactivate(q, cl); |
1280 | 1294 | ||
1281 | if (cl->cmode != HTB_CAN_SEND) | 1295 | if (cl->cmode != HTB_CAN_SEND) |
1282 | rb_erase(&cl->pq_node, q->wait_pq + cl->level); | 1296 | htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); |
1283 | 1297 | ||
1284 | kfree(cl); | 1298 | kfree(cl); |
1285 | } | 1299 | } |
@@ -1370,6 +1384,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1370 | 1384 | ||
1371 | if (!cl) { /* new class */ | 1385 | if (!cl) { /* new class */ |
1372 | struct Qdisc *new_q; | 1386 | struct Qdisc *new_q; |
1387 | int prio; | ||
1388 | |||
1373 | /* check for valid classid */ | 1389 | /* check for valid classid */ |
1374 | if (!classid || TC_H_MAJ(classid ^ sch->handle) | 1390 | if (!classid || TC_H_MAJ(classid ^ sch->handle) |
1375 | || htb_find(classid, sch)) | 1391 | || htb_find(classid, sch)) |
@@ -1389,6 +1405,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1389 | INIT_HLIST_NODE(&cl->hlist); | 1405 | INIT_HLIST_NODE(&cl->hlist); |
1390 | INIT_LIST_HEAD(&cl->children); | 1406 | INIT_LIST_HEAD(&cl->children); |
1391 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); | 1407 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); |
1408 | RB_CLEAR_NODE(&cl->pq_node); | ||
1409 | |||
1410 | for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) | ||
1411 | RB_CLEAR_NODE(&cl->node[prio]); | ||
1392 | 1412 | ||
1393 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) | 1413 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) |
1394 | so that can't be used inside of sch_tree_lock | 1414 | so that can't be used inside of sch_tree_lock |
@@ -1404,7 +1424,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1404 | 1424 | ||
1405 | /* remove from evt list because of level change */ | 1425 | /* remove from evt list because of level change */ |
1406 | if (parent->cmode != HTB_CAN_SEND) { | 1426 | if (parent->cmode != HTB_CAN_SEND) { |
1407 | rb_erase(&parent->pq_node, q->wait_pq); | 1427 | htb_safe_rb_erase(&parent->pq_node, q->wait_pq); |
1408 | parent->cmode = HTB_CAN_SEND; | 1428 | parent->cmode = HTB_CAN_SEND; |
1409 | } | 1429 | } |
1410 | parent->level = (parent->parent ? parent->parent->level | 1430 | parent->level = (parent->parent ? parent->parent->level |