aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_gred.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-01-04 21:25:16 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-05 14:01:21 -0500
commiteeca6688d6599c28bc449a45facb67d7f203be74 (patch)
tree5cabbf24a3c1ee2d7757c873ba6449296a8ef7b7 /net/sched/sch_gred.c
parent18cb809850fb499ad9bf288696a95f4071f73931 (diff)
net_sched: red: split red_parms into parms and vars
This patch splits the red_parms structure into two components. One holding the RED 'constant' parameters, and one containing the variables. This permits a size reduction of GRED qdisc, and is a preliminary step to add an optional RED unit to SFQ. SFQRED will have a single red_parms structure shared by all flows, and a private red_vars per flow. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Dave Taht <dave.taht@gmail.com> CC: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_gred.c')
-rw-r--r--net/sched/sch_gred.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 53204de71c39..0b15236be7b6 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -41,6 +41,7 @@ struct gred_sched_data {
41 u8 prio; /* the prio of this vq */ 41 u8 prio; /* the prio of this vq */
42 42
43 struct red_parms parms; 43 struct red_parms parms;
44 struct red_vars vars;
44 struct red_stats stats; 45 struct red_stats stats;
45}; 46};
46 47
@@ -55,7 +56,7 @@ struct gred_sched {
55 u32 red_flags; 56 u32 red_flags;
56 u32 DPs; 57 u32 DPs;
57 u32 def; 58 u32 def;
58 struct red_parms wred_set; 59 struct red_vars wred_set;
59}; 60};
60 61
61static inline int gred_wred_mode(struct gred_sched *table) 62static inline int gred_wred_mode(struct gred_sched *table)
@@ -125,17 +126,17 @@ static inline u16 tc_index_to_dp(struct sk_buff *skb)
125 return skb->tc_index & GRED_VQ_MASK; 126 return skb->tc_index & GRED_VQ_MASK;
126} 127}
127 128
128static inline void gred_load_wred_set(struct gred_sched *table, 129static inline void gred_load_wred_set(const struct gred_sched *table,
129 struct gred_sched_data *q) 130 struct gred_sched_data *q)
130{ 131{
131 q->parms.qavg = table->wred_set.qavg; 132 q->vars.qavg = table->wred_set.qavg;
132 q->parms.qidlestart = table->wred_set.qidlestart; 133 q->vars.qidlestart = table->wred_set.qidlestart;
133} 134}
134 135
135static inline void gred_store_wred_set(struct gred_sched *table, 136static inline void gred_store_wred_set(struct gred_sched *table,
136 struct gred_sched_data *q) 137 struct gred_sched_data *q)
137{ 138{
138 table->wred_set.qavg = q->parms.qavg; 139 table->wred_set.qavg = q->vars.qavg;
139} 140}
140 141
141static inline int gred_use_ecn(struct gred_sched *t) 142static inline int gred_use_ecn(struct gred_sched *t)
@@ -170,7 +171,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
170 goto drop; 171 goto drop;
171 } 172 }
172 173
173 /* fix tc_index? --could be controvesial but needed for 174 /* fix tc_index? --could be controversial but needed for
174 requeueing */ 175 requeueing */
175 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; 176 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
176 } 177 }
@@ -181,8 +182,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
181 182
182 for (i = 0; i < t->DPs; i++) { 183 for (i = 0; i < t->DPs; i++) {
183 if (t->tab[i] && t->tab[i]->prio < q->prio && 184 if (t->tab[i] && t->tab[i]->prio < q->prio &&
184 !red_is_idling(&t->tab[i]->parms)) 185 !red_is_idling(&t->tab[i]->vars))
185 qavg += t->tab[i]->parms.qavg; 186 qavg += t->tab[i]->vars.qavg;
186 } 187 }
187 188
188 } 189 }
@@ -193,15 +194,17 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
193 if (gred_wred_mode(t)) 194 if (gred_wred_mode(t))
194 gred_load_wred_set(t, q); 195 gred_load_wred_set(t, q);
195 196
196 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch)); 197 q->vars.qavg = red_calc_qavg(&q->parms,
198 &q->vars,
199 gred_backlog(t, q, sch));
197 200
198 if (red_is_idling(&q->parms)) 201 if (red_is_idling(&q->vars))
199 red_end_of_idle_period(&q->parms); 202 red_end_of_idle_period(&q->vars);
200 203
201 if (gred_wred_mode(t)) 204 if (gred_wred_mode(t))
202 gred_store_wred_set(t, q); 205 gred_store_wred_set(t, q);
203 206
204 switch (red_action(&q->parms, q->parms.qavg + qavg)) { 207 switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
205 case RED_DONT_MARK: 208 case RED_DONT_MARK:
206 break; 209 break;
207 210
@@ -260,7 +263,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
260 q->backlog -= qdisc_pkt_len(skb); 263 q->backlog -= qdisc_pkt_len(skb);
261 264
262 if (!q->backlog && !gred_wred_mode(t)) 265 if (!q->backlog && !gred_wred_mode(t))
263 red_start_of_idle_period(&q->parms); 266 red_start_of_idle_period(&q->vars);
264 } 267 }
265 268
266 return skb; 269 return skb;
@@ -293,7 +296,7 @@ static unsigned int gred_drop(struct Qdisc *sch)
293 q->stats.other++; 296 q->stats.other++;
294 297
295 if (!q->backlog && !gred_wred_mode(t)) 298 if (!q->backlog && !gred_wred_mode(t))
296 red_start_of_idle_period(&q->parms); 299 red_start_of_idle_period(&q->vars);
297 } 300 }
298 301
299 qdisc_drop(skb, sch); 302 qdisc_drop(skb, sch);
@@ -320,7 +323,7 @@ static void gred_reset(struct Qdisc *sch)
320 if (!q) 323 if (!q)
321 continue; 324 continue;
322 325
323 red_restart(&q->parms); 326 red_restart(&q->vars);
324 q->backlog = 0; 327 q->backlog = 0;
325 } 328 }
326} 329}
@@ -398,12 +401,12 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
398 q->limit = ctl->limit; 401 q->limit = ctl->limit;
399 402
400 if (q->backlog == 0) 403 if (q->backlog == 0)
401 red_end_of_idle_period(&q->parms); 404 red_end_of_idle_period(&q->vars);
402 405
403 red_set_parms(&q->parms, 406 red_set_parms(&q->parms,
404 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, 407 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
405 ctl->Scell_log, stab, max_P); 408 ctl->Scell_log, stab, max_P);
406 409 red_set_vars(&q->vars);
407 return 0; 410 return 0;
408} 411}
409 412
@@ -563,12 +566,12 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
563 opt.bytesin = q->bytesin; 566 opt.bytesin = q->bytesin;
564 567
565 if (gred_wred_mode(table)) { 568 if (gred_wred_mode(table)) {
566 q->parms.qidlestart = 569 q->vars.qidlestart =
567 table->tab[table->def]->parms.qidlestart; 570 table->tab[table->def]->vars.qidlestart;
568 q->parms.qavg = table->tab[table->def]->parms.qavg; 571 q->vars.qavg = table->tab[table->def]->vars.qavg;
569 } 572 }
570 573
571 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); 574 opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
572 575
573append_opt: 576append_opt:
574 if (nla_append(skb, sizeof(opt), &opt) < 0) 577 if (nla_append(skb, sizeof(opt), &opt) < 0)