aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-01-04 21:25:16 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-05 14:01:21 -0500
commiteeca6688d6599c28bc449a45facb67d7f203be74 (patch)
tree5cabbf24a3c1ee2d7757c873ba6449296a8ef7b7 /include/net
parent18cb809850fb499ad9bf288696a95f4071f73931 (diff)
net_sched: red: split red_parms into parms and vars
This patch splits the red_parms structure into two components. One holding the RED 'constant' parameters, and one containing the variables. This permits a size reduction of GRED qdisc, and is a preliminary step to add an optional RED unit to SFQ. SFQRED will have a single red_parms structure shared by all flows, and a private red_vars per flow. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Dave Taht <dave.taht@gmail.com> CC: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/red.h98
1 files changed, 55 insertions, 43 deletions
diff --git a/include/net/red.h b/include/net/red.h
index ef715a16cce4..baab385a4736 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -137,7 +137,9 @@ struct red_parms {
137 u8 Wlog; /* log(W) */ 137 u8 Wlog; /* log(W) */
138 u8 Plog; /* random number bits */ 138 u8 Plog; /* random number bits */
139 u8 Stab[RED_STAB_SIZE]; 139 u8 Stab[RED_STAB_SIZE];
140};
140 141
142struct red_vars {
141 /* Variables */ 143 /* Variables */
142 int qcount; /* Number of packets since last random 144 int qcount; /* Number of packets since last random
143 number generation */ 145 number generation */
@@ -152,6 +154,16 @@ static inline u32 red_maxp(u8 Plog)
152 return Plog < 32 ? (~0U >> Plog) : ~0U; 154 return Plog < 32 ? (~0U >> Plog) : ~0U;
153} 155}
154 156
157static inline void red_set_vars(struct red_vars *v)
158{
159 /* Reset average queue length, the value is strictly bound
160 * to the parameters below, reseting hurts a bit but leaving
161 * it might result in an unreasonable qavg for a while. --TGR
162 */
163 v->qavg = 0;
164
165 v->qcount = -1;
166}
155 167
156static inline void red_set_parms(struct red_parms *p, 168static inline void red_set_parms(struct red_parms *p,
157 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, 169 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
@@ -160,13 +172,6 @@ static inline void red_set_parms(struct red_parms *p,
160 int delta = qth_max - qth_min; 172 int delta = qth_max - qth_min;
161 u32 max_p_delta; 173 u32 max_p_delta;
162 174
163 /* Reset average queue length, the value is strictly bound
164 * to the parameters below, reseting hurts a bit but leaving
165 * it might result in an unreasonable qavg for a while. --TGR
166 */
167 p->qavg = 0;
168
169 p->qcount = -1;
170 p->qth_min = qth_min << Wlog; 175 p->qth_min = qth_min << Wlog;
171 p->qth_max = qth_max << Wlog; 176 p->qth_max = qth_max << Wlog;
172 p->Wlog = Wlog; 177 p->Wlog = Wlog;
@@ -197,31 +202,32 @@ static inline void red_set_parms(struct red_parms *p,
197 memcpy(p->Stab, stab, sizeof(p->Stab)); 202 memcpy(p->Stab, stab, sizeof(p->Stab));
198} 203}
199 204
200static inline int red_is_idling(const struct red_parms *p) 205static inline int red_is_idling(const struct red_vars *v)
201{ 206{
202 return p->qidlestart.tv64 != 0; 207 return v->qidlestart.tv64 != 0;
203} 208}
204 209
205static inline void red_start_of_idle_period(struct red_parms *p) 210static inline void red_start_of_idle_period(struct red_vars *v)
206{ 211{
207 p->qidlestart = ktime_get(); 212 v->qidlestart = ktime_get();
208} 213}
209 214
210static inline void red_end_of_idle_period(struct red_parms *p) 215static inline void red_end_of_idle_period(struct red_vars *v)
211{ 216{
212 p->qidlestart.tv64 = 0; 217 v->qidlestart.tv64 = 0;
213} 218}
214 219
215static inline void red_restart(struct red_parms *p) 220static inline void red_restart(struct red_vars *v)
216{ 221{
217 red_end_of_idle_period(p); 222 red_end_of_idle_period(v);
218 p->qavg = 0; 223 v->qavg = 0;
219 p->qcount = -1; 224 v->qcount = -1;
220} 225}
221 226
222static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p) 227static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
228 const struct red_vars *v)
223{ 229{
224 s64 delta = ktime_us_delta(ktime_get(), p->qidlestart); 230 s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
225 long us_idle = min_t(s64, delta, p->Scell_max); 231 long us_idle = min_t(s64, delta, p->Scell_max);
226 int shift; 232 int shift;
227 233
@@ -248,7 +254,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
248 shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; 254 shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
249 255
250 if (shift) 256 if (shift)
251 return p->qavg >> shift; 257 return v->qavg >> shift;
252 else { 258 else {
253 /* Approximate initial part of exponent with linear function: 259 /* Approximate initial part of exponent with linear function:
254 * 260 *
@@ -257,16 +263,17 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
257 * Seems, it is the best solution to 263 * Seems, it is the best solution to
258 * problem of too coarse exponent tabulation. 264 * problem of too coarse exponent tabulation.
259 */ 265 */
260 us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; 266 us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
261 267
262 if (us_idle < (p->qavg >> 1)) 268 if (us_idle < (v->qavg >> 1))
263 return p->qavg - us_idle; 269 return v->qavg - us_idle;
264 else 270 else
265 return p->qavg >> 1; 271 return v->qavg >> 1;
266 } 272 }
267} 273}
268 274
269static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p, 275static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
276 const struct red_vars *v,
270 unsigned int backlog) 277 unsigned int backlog)
271{ 278{
272 /* 279 /*
@@ -278,16 +285,17 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p
278 * 285 *
279 * --ANK (980924) 286 * --ANK (980924)
280 */ 287 */
281 return p->qavg + (backlog - (p->qavg >> p->Wlog)); 288 return v->qavg + (backlog - (v->qavg >> p->Wlog));
282} 289}
283 290
284static inline unsigned long red_calc_qavg(const struct red_parms *p, 291static inline unsigned long red_calc_qavg(const struct red_parms *p,
292 const struct red_vars *v,
285 unsigned int backlog) 293 unsigned int backlog)
286{ 294{
287 if (!red_is_idling(p)) 295 if (!red_is_idling(v))
288 return red_calc_qavg_no_idle_time(p, backlog); 296 return red_calc_qavg_no_idle_time(p, v, backlog);
289 else 297 else
290 return red_calc_qavg_from_idle_time(p); 298 return red_calc_qavg_from_idle_time(p, v);
291} 299}
292 300
293 301
@@ -296,7 +304,9 @@ static inline u32 red_random(const struct red_parms *p)
296 return reciprocal_divide(net_random(), p->max_P_reciprocal); 304 return reciprocal_divide(net_random(), p->max_P_reciprocal);
297} 305}
298 306
299static inline int red_mark_probability(const struct red_parms *p, unsigned long qavg) 307static inline int red_mark_probability(const struct red_parms *p,
308 const struct red_vars *v,
309 unsigned long qavg)
300{ 310{
301 /* The formula used below causes questions. 311 /* The formula used below causes questions.
302 312
@@ -314,7 +324,7 @@ static inline int red_mark_probability(const struct red_parms *p, unsigned long
314 324
315 Any questions? --ANK (980924) 325 Any questions? --ANK (980924)
316 */ 326 */
317 return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR); 327 return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
318} 328}
319 329
320enum { 330enum {
@@ -323,7 +333,7 @@ enum {
323 RED_ABOVE_MAX_TRESH, 333 RED_ABOVE_MAX_TRESH,
324}; 334};
325 335
326static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg) 336static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
327{ 337{
328 if (qavg < p->qth_min) 338 if (qavg < p->qth_min)
329 return RED_BELOW_MIN_THRESH; 339 return RED_BELOW_MIN_THRESH;
@@ -339,27 +349,29 @@ enum {
339 RED_HARD_MARK, 349 RED_HARD_MARK,
340}; 350};
341 351
342static inline int red_action(struct red_parms *p, unsigned long qavg) 352static inline int red_action(const struct red_parms *p,
353 struct red_vars *v,
354 unsigned long qavg)
343{ 355{
344 switch (red_cmp_thresh(p, qavg)) { 356 switch (red_cmp_thresh(p, qavg)) {
345 case RED_BELOW_MIN_THRESH: 357 case RED_BELOW_MIN_THRESH:
346 p->qcount = -1; 358 v->qcount = -1;
347 return RED_DONT_MARK; 359 return RED_DONT_MARK;
348 360
349 case RED_BETWEEN_TRESH: 361 case RED_BETWEEN_TRESH:
350 if (++p->qcount) { 362 if (++v->qcount) {
351 if (red_mark_probability(p, qavg)) { 363 if (red_mark_probability(p, v, qavg)) {
352 p->qcount = 0; 364 v->qcount = 0;
353 p->qR = red_random(p); 365 v->qR = red_random(p);
354 return RED_PROB_MARK; 366 return RED_PROB_MARK;
355 } 367 }
356 } else 368 } else
357 p->qR = red_random(p); 369 v->qR = red_random(p);
358 370
359 return RED_DONT_MARK; 371 return RED_DONT_MARK;
360 372
361 case RED_ABOVE_MAX_TRESH: 373 case RED_ABOVE_MAX_TRESH:
362 p->qcount = -1; 374 v->qcount = -1;
363 return RED_HARD_MARK; 375 return RED_HARD_MARK;
364 } 376 }
365 377
@@ -367,14 +379,14 @@ static inline int red_action(struct red_parms *p, unsigned long qavg)
367 return RED_DONT_MARK; 379 return RED_DONT_MARK;
368} 380}
369 381
370static inline void red_adaptative_algo(struct red_parms *p) 382static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
371{ 383{
372 unsigned long qavg; 384 unsigned long qavg;
373 u32 max_p_delta; 385 u32 max_p_delta;
374 386
375 qavg = p->qavg; 387 qavg = v->qavg;
376 if (red_is_idling(p)) 388 if (red_is_idling(v))
377 qavg = red_calc_qavg_from_idle_time(p); 389 qavg = red_calc_qavg_from_idle_time(p, v);
378 390
379 /* p->qavg is fixed point number with point at Wlog */ 391 /* p->qavg is fixed point number with point at Wlog */
380 qavg >>= p->Wlog; 392 qavg >>= p->Wlog;