diff options
| author | Jarek Poplawski <jarkao2@gmail.com> | 2009-02-01 04:13:22 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2009-02-01 04:13:22 -0500 |
| commit | 1224736d97e83367bb66e29c2bee0f570f09db3e (patch) | |
| tree | 540dcc52f54a0a440826cd66d1f43c558e5013a7 /net/sched | |
| parent | e82181de5ef4648074765912d2d82d6bd60115eb (diff) | |
pkt_sched: sch_htb: Use workqueue to schedule after too many events.
Patrick McHardy <kaber@trash.net> suggested using a workqueue instead
of hrtimers to trigger netif_schedule() when there is a problem with
setting exact time of this event: 'The differnce - yeah, it shouldn't
make much, mainly wake up the qdisc earlier (but not too early) after
"too many events" occured _and_ no further enqueue events wake up the
qdisc anyways.'
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
| -rw-r--r-- | net/sched/sch_htb.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 826f92145261..355974f610c5 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
| 36 | #include <linux/compiler.h> | 36 | #include <linux/compiler.h> |
| 37 | #include <linux/rbtree.h> | 37 | #include <linux/rbtree.h> |
| 38 | #include <linux/workqueue.h> | ||
| 38 | #include <net/netlink.h> | 39 | #include <net/netlink.h> |
| 39 | #include <net/pkt_sched.h> | 40 | #include <net/pkt_sched.h> |
| 40 | 41 | ||
| @@ -156,6 +157,7 @@ struct htb_sched { | |||
| 156 | 157 | ||
| 157 | #define HTB_WARN_TOOMANYEVENTS 0x1 | 158 | #define HTB_WARN_TOOMANYEVENTS 0x1 |
| 158 | unsigned int warned; /* only one warning */ | 159 | unsigned int warned; /* only one warning */ |
| 160 | struct work_struct work; | ||
| 159 | }; | 161 | }; |
| 160 | 162 | ||
| 161 | /* find class in global hash table using given handle */ | 163 | /* find class in global hash table using given handle */ |
| @@ -659,7 +661,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
| 659 | * htb_do_events - make mode changes to classes at the level | 661 | * htb_do_events - make mode changes to classes at the level |
| 660 | * | 662 | * |
| 661 | * Scans event queue for pending events and applies them. Returns time of | 663 | * Scans event queue for pending events and applies them. Returns time of |
| 662 | * next pending event (0 for no event in pq). | 664 | * next pending event (0 for no event in pq, q->now for too many events). |
| 663 | * Note: Applied are events whose have cl->pq_key <= q->now. | 665 | * Note: Applied are events whose have cl->pq_key <= q->now. |
| 664 | */ | 666 | */ |
| 665 | static psched_time_t htb_do_events(struct htb_sched *q, int level, | 667 | static psched_time_t htb_do_events(struct htb_sched *q, int level, |
| @@ -687,12 +689,14 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, | |||
| 687 | if (cl->cmode != HTB_CAN_SEND) | 689 | if (cl->cmode != HTB_CAN_SEND) |
| 688 | htb_add_to_wait_tree(q, cl, diff); | 690 | htb_add_to_wait_tree(q, cl, diff); |
| 689 | } | 691 | } |
| 690 | /* too much load - let's continue on next jiffie (including above) */ | 692 | |
| 693 | /* too much load - let's continue after a break for scheduling */ | ||
| 691 | if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { | 694 | if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { |
| 692 | printk(KERN_WARNING "htb: too many events!\n"); | 695 | printk(KERN_WARNING "htb: too many events!\n"); |
| 693 | q->warned |= HTB_WARN_TOOMANYEVENTS; | 696 | q->warned |= HTB_WARN_TOOMANYEVENTS; |
| 694 | } | 697 | } |
| 695 | return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ; | 698 | |
| 699 | return q->now; | ||
| 696 | } | 700 | } |
| 697 | 701 | ||
| 698 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL | 702 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL |
| @@ -892,7 +896,10 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
| 892 | } | 896 | } |
| 893 | } | 897 | } |
| 894 | sch->qstats.overlimits++; | 898 | sch->qstats.overlimits++; |
| 895 | qdisc_watchdog_schedule(&q->watchdog, next_event); | 899 | if (likely(next_event > q->now)) |
| 900 | qdisc_watchdog_schedule(&q->watchdog, next_event); | ||
| 901 | else | ||
| 902 | schedule_work(&q->work); | ||
| 896 | fin: | 903 | fin: |
| 897 | return skb; | 904 | return skb; |
| 898 | } | 905 | } |
| @@ -962,6 +969,14 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { | |||
| 962 | [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | 969 | [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, |
| 963 | }; | 970 | }; |
| 964 | 971 | ||
| 972 | static void htb_work_func(struct work_struct *work) | ||
| 973 | { | ||
| 974 | struct htb_sched *q = container_of(work, struct htb_sched, work); | ||
| 975 | struct Qdisc *sch = q->watchdog.qdisc; | ||
| 976 | |||
| 977 | __netif_schedule(qdisc_root(sch)); | ||
| 978 | } | ||
| 979 | |||
| 965 | static int htb_init(struct Qdisc *sch, struct nlattr *opt) | 980 | static int htb_init(struct Qdisc *sch, struct nlattr *opt) |
| 966 | { | 981 | { |
| 967 | struct htb_sched *q = qdisc_priv(sch); | 982 | struct htb_sched *q = qdisc_priv(sch); |
| @@ -996,6 +1011,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 996 | INIT_LIST_HEAD(q->drops + i); | 1011 | INIT_LIST_HEAD(q->drops + i); |
| 997 | 1012 | ||
| 998 | qdisc_watchdog_init(&q->watchdog, sch); | 1013 | qdisc_watchdog_init(&q->watchdog, sch); |
| 1014 | INIT_WORK(&q->work, htb_work_func); | ||
| 999 | skb_queue_head_init(&q->direct_queue); | 1015 | skb_queue_head_init(&q->direct_queue); |
| 1000 | 1016 | ||
| 1001 | q->direct_qlen = qdisc_dev(sch)->tx_queue_len; | 1017 | q->direct_qlen = qdisc_dev(sch)->tx_queue_len; |
| @@ -1188,7 +1204,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
| 1188 | kfree(cl); | 1204 | kfree(cl); |
| 1189 | } | 1205 | } |
| 1190 | 1206 | ||
| 1191 | /* always caled under BH & queue lock */ | ||
| 1192 | static void htb_destroy(struct Qdisc *sch) | 1207 | static void htb_destroy(struct Qdisc *sch) |
| 1193 | { | 1208 | { |
| 1194 | struct htb_sched *q = qdisc_priv(sch); | 1209 | struct htb_sched *q = qdisc_priv(sch); |
| @@ -1196,6 +1211,7 @@ static void htb_destroy(struct Qdisc *sch) | |||
| 1196 | struct htb_class *cl; | 1211 | struct htb_class *cl; |
| 1197 | unsigned int i; | 1212 | unsigned int i; |
| 1198 | 1213 | ||
| 1214 | cancel_work_sync(&q->work); | ||
| 1199 | qdisc_watchdog_cancel(&q->watchdog); | 1215 | qdisc_watchdog_cancel(&q->watchdog); |
| 1200 | /* This line used to be after htb_destroy_class call below | 1216 | /* This line used to be after htb_destroy_class call below |
| 1201 | and surprisingly it worked in 2.4. But it must precede it | 1217 | and surprisingly it worked in 2.4. But it must precede it |
