aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-06-04 03:11:48 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-04 20:44:07 -0400
commit5343a7f8be11951cb3095b91e8e4eb506cfacc0f (patch)
treed114dd1783cb9bd7b9330ee78ff90bda4241a754 /net/sched/sch_htb.c
parent5e71d9d77c07fa7d4c42287a177f7b738d0cd4b9 (diff)
net_sched: htb: do not mix 1ns and 64ns time units
commit 56b765b79 ("htb: improved accuracy at high rates") added another regression for low rates, because it mixes 1ns and 64ns time units. So the maximum delay (mbuffer) was not 60 second, but 937 ms. Lets convert all time fields to 1ns as 64bit arches are becoming the norm. Reported-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f87fb850b7ef..adaedd79389c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -109,7 +109,7 @@ struct htb_class {
109 } un; 109 } un;
110 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ 110 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
111 struct rb_node pq_node; /* node for event queue */ 111 struct rb_node pq_node; /* node for event queue */
112 psched_time_t pq_key; 112 s64 pq_key;
113 113
114 int prio_activity; /* for which prios are we active */ 114 int prio_activity; /* for which prios are we active */
115 enum htb_cmode cmode; /* current mode of the class */ 115 enum htb_cmode cmode; /* current mode of the class */
@@ -121,10 +121,10 @@ struct htb_class {
121 /* token bucket parameters */ 121 /* token bucket parameters */
122 struct psched_ratecfg rate; 122 struct psched_ratecfg rate;
123 struct psched_ratecfg ceil; 123 struct psched_ratecfg ceil;
124 s64 buffer, cbuffer; /* token bucket depth/rate */ 124 s64 buffer, cbuffer; /* token bucket depth/rate */
125 psched_tdiff_t mbuffer; /* max wait time */ 125 s64 mbuffer; /* max wait time */
126 s64 tokens, ctokens; /* current number of tokens */ 126 s64 tokens, ctokens; /* current number of tokens */
127 psched_time_t t_c; /* checkpoint time */ 127 s64 t_c; /* checkpoint time */
128}; 128};
129 129
130struct htb_sched { 130struct htb_sched {
@@ -141,15 +141,15 @@ struct htb_sched {
141 struct rb_root wait_pq[TC_HTB_MAXDEPTH]; 141 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
142 142
143 /* time of nearest event per level (row) */ 143 /* time of nearest event per level (row) */
144 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH]; 144 s64 near_ev_cache[TC_HTB_MAXDEPTH];
145 145
146 int defcls; /* class where unclassified flows go to */ 146 int defcls; /* class where unclassified flows go to */
147 147
148 /* filters for qdisc itself */ 148 /* filters for qdisc itself */
149 struct tcf_proto *filter_list; 149 struct tcf_proto *filter_list;
150 150
151 int rate2quantum; /* quant = rate / rate2quantum */ 151 int rate2quantum; /* quant = rate / rate2quantum */
152 psched_time_t now; /* cached dequeue time */ 152 s64 now; /* cached dequeue time */
153 struct qdisc_watchdog watchdog; 153 struct qdisc_watchdog watchdog;
154 154
155 /* non shaped skbs; let them go directly thru */ 155 /* non shaped skbs; let them go directly thru */
@@ -664,8 +664,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
664 * next pending event (0 for no event in pq, q->now for too many events). 664 * next pending event (0 for no event in pq, q->now for too many events).
665 * Note: Applied are events whose have cl->pq_key <= q->now. 665 * Note: Applied are events whose have cl->pq_key <= q->now.
666 */ 666 */
667static psched_time_t htb_do_events(struct htb_sched *q, int level, 667static s64 htb_do_events(struct htb_sched *q, int level,
668 unsigned long start) 668 unsigned long start)
669{ 669{
670 /* don't run for longer than 2 jiffies; 2 is used instead of 670 /* don't run for longer than 2 jiffies; 2 is used instead of
671 * 1 to simplify things when jiffy is going to be incremented 671 * 1 to simplify things when jiffy is going to be incremented
@@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
857 struct sk_buff *skb; 857 struct sk_buff *skb;
858 struct htb_sched *q = qdisc_priv(sch); 858 struct htb_sched *q = qdisc_priv(sch);
859 int level; 859 int level;
860 psched_time_t next_event; 860 s64 next_event;
861 unsigned long start_at; 861 unsigned long start_at;
862 862
863 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 863 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
@@ -880,7 +880,7 @@ ok:
880 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 880 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
881 /* common case optimization - skip event handler quickly */ 881 /* common case optimization - skip event handler quickly */
882 int m; 882 int m;
883 psched_time_t event; 883 s64 event;
884 884
885 if (q->now >= q->near_ev_cache[level]) { 885 if (q->now >= q->near_ev_cache[level]) {
886 event = htb_do_events(q, level, start_at); 886 event = htb_do_events(q, level, start_at);
@@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1117 1117
1118 if (!cl->level && cl->un.leaf.q) 1118 if (!cl->level && cl->un.leaf.q)
1119 cl->qstats.qlen = cl->un.leaf.q->q.qlen; 1119 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1120 cl->xstats.tokens = cl->tokens; 1120 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
1121 cl->xstats.ctokens = cl->ctokens; 1121 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
1122 1122
1123 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1123 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1124 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || 1124 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
@@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1200 parent->un.leaf.q = new_q ? new_q : &noop_qdisc; 1200 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1201 parent->tokens = parent->buffer; 1201 parent->tokens = parent->buffer;
1202 parent->ctokens = parent->cbuffer; 1202 parent->ctokens = parent->cbuffer;
1203 parent->t_c = psched_get_time(); 1203 parent->t_c = ktime_to_ns(ktime_get());
1204 parent->cmode = HTB_CAN_SEND; 1204 parent->cmode = HTB_CAN_SEND;
1205} 1205}
1206 1206
@@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1417 /* set class to be in HTB_CAN_SEND state */ 1417 /* set class to be in HTB_CAN_SEND state */
1418 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); 1418 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1419 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); 1419 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1420 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ 1420 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1421 cl->t_c = psched_get_time(); 1421 cl->t_c = ktime_to_ns(ktime_get());
1422 cl->cmode = HTB_CAN_SEND; 1422 cl->cmode = HTB_CAN_SEND;
1423 1423
1424 /* attach to the hash list and parent's family */ 1424 /* attach to the hash list and parent's family */