diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-11-30 07:10:53 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-11-30 23:27:22 -0500 |
commit | ea6a5d3b97b768561db6358f15e4c84ced0f4f7e (patch) | |
tree | 9f9e41800756d04c0edbfdeb8d8e35fb2273b6fd /include | |
parent | 917fbdb32f37e9a93b00bb12ee83532982982df3 (diff) |
sch_red: fix red_calc_qavg_from_idle_time
Since commit a4a710c4a7490587 (pkt_sched: Change PSCHED_SHIFT from 10 to
6) it seems RED/GRED are broken.
red_calc_qavg_from_idle_time() computes a delay in us units, but this
delay is now 16 times bigger than real delay, so the final qavg result
smaller than expected.
Use standard kernel time services since there is no need to obfuscate
them.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/red.h | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/include/net/red.h b/include/net/red.h index 3319f16b3beb..b72a3b833936 100644 --- a/include/net/red.h +++ b/include/net/red.h | |||
@@ -116,7 +116,7 @@ struct red_parms { | |||
116 | u32 qR; /* Cached random number */ | 116 | u32 qR; /* Cached random number */ |
117 | 117 | ||
118 | unsigned long qavg; /* Average queue length: A scaled */ | 118 | unsigned long qavg; /* Average queue length: A scaled */ |
119 | psched_time_t qidlestart; /* Start of current idle period */ | 119 | ktime_t qidlestart; /* Start of current idle period */ |
120 | }; | 120 | }; |
121 | 121 | ||
122 | static inline u32 red_rmask(u8 Plog) | 122 | static inline u32 red_rmask(u8 Plog) |
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p, | |||
148 | 148 | ||
149 | static inline int red_is_idling(struct red_parms *p) | 149 | static inline int red_is_idling(struct red_parms *p) |
150 | { | 150 | { |
151 | return p->qidlestart != PSCHED_PASTPERFECT; | 151 | return p->qidlestart.tv64 != 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | static inline void red_start_of_idle_period(struct red_parms *p) | 154 | static inline void red_start_of_idle_period(struct red_parms *p) |
155 | { | 155 | { |
156 | p->qidlestart = psched_get_time(); | 156 | p->qidlestart = ktime_get(); |
157 | } | 157 | } |
158 | 158 | ||
159 | static inline void red_end_of_idle_period(struct red_parms *p) | 159 | static inline void red_end_of_idle_period(struct red_parms *p) |
160 | { | 160 | { |
161 | p->qidlestart = PSCHED_PASTPERFECT; | 161 | p->qidlestart.tv64 = 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | static inline void red_restart(struct red_parms *p) | 164 | static inline void red_restart(struct red_parms *p) |
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p) | |||
170 | 170 | ||
171 | static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) | 171 | static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) |
172 | { | 172 | { |
173 | psched_time_t now; | 173 | s64 delta = ktime_us_delta(ktime_get(), p->qidlestart); |
174 | long us_idle; | 174 | long us_idle = min_t(s64, delta, p->Scell_max); |
175 | int shift; | 175 | int shift; |
176 | 176 | ||
177 | now = psched_get_time(); | ||
178 | us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max); | ||
179 | |||
180 | /* | 177 | /* |
181 | * The problem: ideally, average length queue recalcultion should | 178 | * The problem: ideally, average length queue recalcultion should |
182 | * be done over constant clock intervals. This is too expensive, so | 179 | * be done over constant clock intervals. This is too expensive, so |