aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-03-16 04:22:39 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:26:13 -0400
commitfb983d4578e238b7f483b4f8f39f3a0f35d34d16 (patch)
treeb168e567f24db119b2bee301036e08dc6289743a /net/sched/sch_htb.c
parent1a13cb63d679da328cfa339c89b8b2d0eba3b81e (diff)
[NET_SCHED]: sch_htb: use hrtimer based watchdog
Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c91
1 files changed, 31 insertions, 60 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 3c3294d01041..4d84200f097b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -128,7 +128,7 @@ struct htb_class {
128 } un; 128 } un;
129 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 129 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
130 struct rb_node pq_node; /* node for event queue */ 130 struct rb_node pq_node; /* node for event queue */
131 unsigned long pq_key; /* the same type as jiffies global */ 131 psched_time_t pq_key;
132 132
133 int prio_activity; /* for which prios are we active */ 133 int prio_activity; /* for which prios are we active */
134 enum htb_cmode cmode; /* current mode of the class */ 134 enum htb_cmode cmode; /* current mode of the class */
@@ -179,10 +179,7 @@ struct htb_sched {
179 struct rb_root wait_pq[TC_HTB_MAXDEPTH]; 179 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
180 180
181 /* time of nearest event per level (row) */ 181 /* time of nearest event per level (row) */
182 unsigned long near_ev_cache[TC_HTB_MAXDEPTH]; 182 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
183
184 /* cached value of jiffies in dequeue */
185 unsigned long jiffies;
186 183
187 /* whether we hit non-work conserving class during this dequeue; we use */ 184 /* whether we hit non-work conserving class during this dequeue; we use */
188 int nwc_hit; /* this to disable mindelay complaint in dequeue */ 185 int nwc_hit; /* this to disable mindelay complaint in dequeue */
@@ -195,7 +192,7 @@ struct htb_sched {
195 192
196 int rate2quantum; /* quant = rate / rate2quantum */ 193 int rate2quantum; /* quant = rate / rate2quantum */
197 psched_time_t now; /* cached dequeue time */ 194 psched_time_t now; /* cached dequeue time */
198 struct timer_list timer; /* send delay timer */ 195 struct qdisc_watchdog watchdog;
199#ifdef HTB_RATECM 196#ifdef HTB_RATECM
200 struct timer_list rttim; /* rate computer timer */ 197 struct timer_list rttim; /* rate computer timer */
201 int recmp_bucket; /* which hash bucket to recompute next */ 198 int recmp_bucket; /* which hash bucket to recompute next */
@@ -342,19 +339,19 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
342{ 339{
343 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; 340 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
344 341
345 cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay); 342 cl->pq_key = q->now + delay;
346 if (cl->pq_key == q->jiffies) 343 if (cl->pq_key == q->now)
347 cl->pq_key++; 344 cl->pq_key++;
348 345
349 /* update the nearest event cache */ 346 /* update the nearest event cache */
350 if (time_after(q->near_ev_cache[cl->level], cl->pq_key)) 347 if (q->near_ev_cache[cl->level] > cl->pq_key)
351 q->near_ev_cache[cl->level] = cl->pq_key; 348 q->near_ev_cache[cl->level] = cl->pq_key;
352 349
353 while (*p) { 350 while (*p) {
354 struct htb_class *c; 351 struct htb_class *c;
355 parent = *p; 352 parent = *p;
356 c = rb_entry(parent, struct htb_class, pq_node); 353 c = rb_entry(parent, struct htb_class, pq_node);
357 if (time_after_eq(cl->pq_key, c->pq_key)) 354 if (cl->pq_key >= c->pq_key)
358 p = &parent->rb_right; 355 p = &parent->rb_right;
359 else 356 else
360 p = &parent->rb_left; 357 p = &parent->rb_left;
@@ -679,14 +676,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
679 return NET_XMIT_SUCCESS; 676 return NET_XMIT_SUCCESS;
680} 677}
681 678
682static void htb_timer(unsigned long arg)
683{
684 struct Qdisc *sch = (struct Qdisc *)arg;
685 sch->flags &= ~TCQ_F_THROTTLED;
686 wmb();
687 netif_schedule(sch->dev);
688}
689
690#ifdef HTB_RATECM 679#ifdef HTB_RATECM
691#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0 680#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
692static void htb_rate_timer(unsigned long arg) 681static void htb_rate_timer(unsigned long arg)
@@ -778,11 +767,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
778/** 767/**
779 * htb_do_events - make mode changes to classes at the level 768 * htb_do_events - make mode changes to classes at the level
780 * 769 *
781 * Scans event queue for pending events and applies them. Returns jiffies to 770 * Scans event queue for pending events and applies them. Returns time of
782 * next pending event (0 for no event in pq). 771 * next pending event (0 for no event in pq).
783 * Note: Aplied are events whose have cl->pq_key <= jiffies. 772 * Note: Applied are events whose have cl->pq_key <= q->now.
784 */ 773 */
785static long htb_do_events(struct htb_sched *q, int level) 774static psched_time_t htb_do_events(struct htb_sched *q, int level)
786{ 775{
787 int i; 776 int i;
788 777
@@ -795,9 +784,9 @@ static long htb_do_events(struct htb_sched *q, int level)
795 return 0; 784 return 0;
796 785
797 cl = rb_entry(p, struct htb_class, pq_node); 786 cl = rb_entry(p, struct htb_class, pq_node);
798 if (time_after(cl->pq_key, q->jiffies)) { 787 if (cl->pq_key > q->now)
799 return cl->pq_key - q->jiffies; 788 return cl->pq_key;
800 } 789
801 htb_safe_rb_erase(p, q->wait_pq + level); 790 htb_safe_rb_erase(p, q->wait_pq + level);
802 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); 791 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
803 htb_change_class_mode(q, cl, &diff); 792 htb_change_class_mode(q, cl, &diff);
@@ -806,7 +795,7 @@ static long htb_do_events(struct htb_sched *q, int level)
806 } 795 }
807 if (net_ratelimit()) 796 if (net_ratelimit())
808 printk(KERN_WARNING "htb: too many events !\n"); 797 printk(KERN_WARNING "htb: too many events !\n");
809 return HZ / 10; 798 return q->now + PSCHED_TICKS_PER_SEC / 10;
810} 799}
811 800
812/* Returns class->node+prio from id-tree where classe's id is >= id. NULL 801/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -958,30 +947,12 @@ next:
958 return skb; 947 return skb;
959} 948}
960 949
961static void htb_delay_by(struct Qdisc *sch, long delay)
962{
963 struct htb_sched *q = qdisc_priv(sch);
964 if (delay <= 0)
965 delay = 1;
966 if (unlikely(delay > 5 * HZ)) {
967 if (net_ratelimit())
968 printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
969 delay = 5 * HZ;
970 }
971 /* why don't use jiffies here ? because expires can be in past */
972 mod_timer(&q->timer, q->jiffies + delay);
973 sch->flags |= TCQ_F_THROTTLED;
974 sch->qstats.overlimits++;
975}
976
977static struct sk_buff *htb_dequeue(struct Qdisc *sch) 950static struct sk_buff *htb_dequeue(struct Qdisc *sch)
978{ 951{
979 struct sk_buff *skb = NULL; 952 struct sk_buff *skb = NULL;
980 struct htb_sched *q = qdisc_priv(sch); 953 struct htb_sched *q = qdisc_priv(sch);
981 int level; 954 int level;
982 long min_delay; 955 psched_time_t next_event;
983
984 q->jiffies = jiffies;
985 956
986 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 957 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
987 skb = __skb_dequeue(&q->direct_queue); 958 skb = __skb_dequeue(&q->direct_queue);
@@ -995,21 +966,23 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
995 goto fin; 966 goto fin;
996 PSCHED_GET_TIME(q->now); 967 PSCHED_GET_TIME(q->now);
997 968
998 min_delay = LONG_MAX; 969 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
999 q->nwc_hit = 0; 970 q->nwc_hit = 0;
1000 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 971 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1001 /* common case optimization - skip event handler quickly */ 972 /* common case optimization - skip event handler quickly */
1002 int m; 973 int m;
1003 long delay; 974 psched_time_t event;
1004 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) { 975
1005 delay = htb_do_events(q, level); 976 if (q->now >= q->near_ev_cache[level]) {
1006 q->near_ev_cache[level] = 977 event = htb_do_events(q, level);
1007 q->jiffies + (delay ? delay : HZ); 978 q->near_ev_cache[level] = event ? event :
979 PSCHED_TICKS_PER_SEC;
1008 } else 980 } else
1009 delay = q->near_ev_cache[level] - q->jiffies; 981 event = q->near_ev_cache[level];
982
983 if (event && next_event > event)
984 next_event = event;
1010 985
1011 if (delay && min_delay > delay)
1012 min_delay = delay;
1013 m = ~q->row_mask[level]; 986 m = ~q->row_mask[level];
1014 while (m != (int)(-1)) { 987 while (m != (int)(-1)) {
1015 int prio = ffz(m); 988 int prio = ffz(m);
@@ -1022,7 +995,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1022 } 995 }
1023 } 996 }
1024 } 997 }
1025 htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay); 998 sch->qstats.overlimits++;
999 qdisc_watchdog_schedule(&q->watchdog, next_event);
1026fin: 1000fin:
1027 return skb; 1001 return skb;
1028} 1002}
@@ -1075,8 +1049,7 @@ static void htb_reset(struct Qdisc *sch)
1075 1049
1076 } 1050 }
1077 } 1051 }
1078 sch->flags &= ~TCQ_F_THROTTLED; 1052 qdisc_watchdog_cancel(&q->watchdog);
1079 del_timer(&q->timer);
1080 __skb_queue_purge(&q->direct_queue); 1053 __skb_queue_purge(&q->direct_queue);
1081 sch->q.qlen = 0; 1054 sch->q.qlen = 0;
1082 memset(q->row, 0, sizeof(q->row)); 1055 memset(q->row, 0, sizeof(q->row));
@@ -1113,14 +1086,12 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1113 for (i = 0; i < TC_HTB_NUMPRIO; i++) 1086 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1114 INIT_LIST_HEAD(q->drops + i); 1087 INIT_LIST_HEAD(q->drops + i);
1115 1088
1116 init_timer(&q->timer); 1089 qdisc_watchdog_init(&q->watchdog, sch);
1117 skb_queue_head_init(&q->direct_queue); 1090 skb_queue_head_init(&q->direct_queue);
1118 1091
1119 q->direct_qlen = sch->dev->tx_queue_len; 1092 q->direct_qlen = sch->dev->tx_queue_len;
1120 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1093 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1121 q->direct_qlen = 2; 1094 q->direct_qlen = 2;
1122 q->timer.function = htb_timer;
1123 q->timer.data = (unsigned long)sch;
1124 1095
1125#ifdef HTB_RATECM 1096#ifdef HTB_RATECM
1126 init_timer(&q->rttim); 1097 init_timer(&q->rttim);
@@ -1341,7 +1312,7 @@ static void htb_destroy(struct Qdisc *sch)
1341{ 1312{
1342 struct htb_sched *q = qdisc_priv(sch); 1313 struct htb_sched *q = qdisc_priv(sch);
1343 1314
1344 del_timer_sync(&q->timer); 1315 qdisc_watchdog_cancel(&q->watchdog);
1345#ifdef HTB_RATECM 1316#ifdef HTB_RATECM
1346 del_timer_sync(&q->rttim); 1317 del_timer_sync(&q->rttim);
1347#endif 1318#endif