diff options
author | David S. Miller <davem@davemloft.net> | 2012-09-15 11:43:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-09-15 11:43:53 -0400 |
commit | b48b63a1f6e26b0dec2c9f1690396ed4bcb66903 (patch) | |
tree | 8d9ad227c3a7d35cd78d40ecaf9bf59375dbd21a /net/sched | |
parent | 7f2e6a5d8608d0353b017a0fe15502307593734e (diff) | |
parent | 3f0c3c8fe30c725c1264fb6db8cc4b69db3a658a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
net/netfilter/nfnetlink_log.c
net/netfilter/xt_LOG.c
Rather easy conflict resolution, the 'net' tree had bug fixes to make
sure we checked if a socket is a time-wait one or not and elide the
logging code if so.
Whereas on the 'net-next' side we are calculating the UID and GID from
the creds using different interfaces due to the user namespace changes
from Eric Biederman.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_cbq.c | 5 | ||||
-rw-r--r-- | net/sched/sch_fq_codel.c | 2 | ||||
-rw-r--r-- | net/sched/sch_gred.c | 38 |
3 files changed, 26 insertions, 19 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 6aabd77d1cfd..564b9fc8efd3 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) | 250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) |
251 | cl = defmap[TC_PRIO_BESTEFFORT]; | 251 | cl = defmap[TC_PRIO_BESTEFFORT]; |
252 | 252 | ||
253 | if (cl == NULL || cl->level >= head->level) | 253 | if (cl == NULL) |
254 | goto fallback; | 254 | goto fallback; |
255 | } | 255 | } |
256 | 256 | if (cl->level >= head->level) | |
257 | goto fallback; | ||
257 | #ifdef CONFIG_NET_CLS_ACT | 258 | #ifdef CONFIG_NET_CLS_ACT |
258 | switch (result) { | 259 | switch (result) { |
259 | case TC_ACT_QUEUED: | 260 | case TC_ACT_QUEUED: |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 9fc1c62ec80e..4e606fcb2534 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -191,7 +191,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
191 | 191 | ||
192 | if (list_empty(&flow->flowchain)) { | 192 | if (list_empty(&flow->flowchain)) { |
193 | list_add_tail(&flow->flowchain, &q->new_flows); | 193 | list_add_tail(&flow->flowchain, &q->new_flows); |
194 | codel_vars_init(&flow->cvars); | ||
195 | q->new_flow_count++; | 194 | q->new_flow_count++; |
196 | flow->deficit = q->quantum; | 195 | flow->deficit = q->quantum; |
197 | flow->dropped = 0; | 196 | flow->dropped = 0; |
@@ -418,6 +417,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) | |||
418 | struct fq_codel_flow *flow = q->flows + i; | 417 | struct fq_codel_flow *flow = q->flows + i; |
419 | 418 | ||
420 | INIT_LIST_HEAD(&flow->flowchain); | 419 | INIT_LIST_HEAD(&flow->flowchain); |
420 | codel_vars_init(&flow->cvars); | ||
421 | } | 421 | } |
422 | } | 422 | } |
423 | if (sch->limit >= 1) | 423 | if (sch->limit >= 1) |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index e901583e4ea5..d42234c0f13b 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -102,9 +102,8 @@ static inline int gred_wred_mode_check(struct Qdisc *sch) | |||
102 | if (q == NULL) | 102 | if (q == NULL) |
103 | continue; | 103 | continue; |
104 | 104 | ||
105 | for (n = 0; n < table->DPs; n++) | 105 | for (n = i + 1; n < table->DPs; n++) |
106 | if (table->tab[n] && table->tab[n] != q && | 106 | if (table->tab[n] && table->tab[n]->prio == q->prio) |
107 | table->tab[n]->prio == q->prio) | ||
108 | return 1; | 107 | return 1; |
109 | } | 108 | } |
110 | 109 | ||
@@ -137,6 +136,7 @@ static inline void gred_store_wred_set(struct gred_sched *table, | |||
137 | struct gred_sched_data *q) | 136 | struct gred_sched_data *q) |
138 | { | 137 | { |
139 | table->wred_set.qavg = q->vars.qavg; | 138 | table->wred_set.qavg = q->vars.qavg; |
139 | table->wred_set.qidlestart = q->vars.qidlestart; | ||
140 | } | 140 | } |
141 | 141 | ||
142 | static inline int gred_use_ecn(struct gred_sched *t) | 142 | static inline int gred_use_ecn(struct gred_sched *t) |
@@ -176,7 +176,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; | 176 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; |
177 | } | 177 | } |
178 | 178 | ||
179 | /* sum up all the qaves of prios <= to ours to get the new qave */ | 179 | /* sum up all the qaves of prios < ours to get the new qave */ |
180 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { | 180 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { |
181 | int i; | 181 | int i; |
182 | 182 | ||
@@ -260,16 +260,18 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) | |||
260 | } else { | 260 | } else { |
261 | q->backlog -= qdisc_pkt_len(skb); | 261 | q->backlog -= qdisc_pkt_len(skb); |
262 | 262 | ||
263 | if (!q->backlog && !gred_wred_mode(t)) | 263 | if (gred_wred_mode(t)) { |
264 | red_start_of_idle_period(&q->vars); | 264 | if (!sch->qstats.backlog) |
265 | red_start_of_idle_period(&t->wred_set); | ||
266 | } else { | ||
267 | if (!q->backlog) | ||
268 | red_start_of_idle_period(&q->vars); | ||
269 | } | ||
265 | } | 270 | } |
266 | 271 | ||
267 | return skb; | 272 | return skb; |
268 | } | 273 | } |
269 | 274 | ||
270 | if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) | ||
271 | red_start_of_idle_period(&t->wred_set); | ||
272 | |||
273 | return NULL; | 275 | return NULL; |
274 | } | 276 | } |
275 | 277 | ||
@@ -291,19 +293,20 @@ static unsigned int gred_drop(struct Qdisc *sch) | |||
291 | q->backlog -= len; | 293 | q->backlog -= len; |
292 | q->stats.other++; | 294 | q->stats.other++; |
293 | 295 | ||
294 | if (!q->backlog && !gred_wred_mode(t)) | 296 | if (gred_wred_mode(t)) { |
295 | red_start_of_idle_period(&q->vars); | 297 | if (!sch->qstats.backlog) |
298 | red_start_of_idle_period(&t->wred_set); | ||
299 | } else { | ||
300 | if (!q->backlog) | ||
301 | red_start_of_idle_period(&q->vars); | ||
302 | } | ||
296 | } | 303 | } |
297 | 304 | ||
298 | qdisc_drop(skb, sch); | 305 | qdisc_drop(skb, sch); |
299 | return len; | 306 | return len; |
300 | } | 307 | } |
301 | 308 | ||
302 | if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) | ||
303 | red_start_of_idle_period(&t->wred_set); | ||
304 | |||
305 | return 0; | 309 | return 0; |
306 | |||
307 | } | 310 | } |
308 | 311 | ||
309 | static void gred_reset(struct Qdisc *sch) | 312 | static void gred_reset(struct Qdisc *sch) |
@@ -535,6 +538,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
535 | for (i = 0; i < MAX_DPs; i++) { | 538 | for (i = 0; i < MAX_DPs; i++) { |
536 | struct gred_sched_data *q = table->tab[i]; | 539 | struct gred_sched_data *q = table->tab[i]; |
537 | struct tc_gred_qopt opt; | 540 | struct tc_gred_qopt opt; |
541 | unsigned long qavg; | ||
538 | 542 | ||
539 | memset(&opt, 0, sizeof(opt)); | 543 | memset(&opt, 0, sizeof(opt)); |
540 | 544 | ||
@@ -566,7 +570,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
566 | if (gred_wred_mode(table)) | 570 | if (gred_wred_mode(table)) |
567 | gred_load_wred_set(table, q); | 571 | gred_load_wred_set(table, q); |
568 | 572 | ||
569 | opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); | 573 | qavg = red_calc_qavg(&q->parms, &q->vars, |
574 | q->vars.qavg >> q->parms.Wlog); | ||
575 | opt.qave = qavg >> q->parms.Wlog; | ||
570 | 576 | ||
571 | append_opt: | 577 | append_opt: |
572 | if (nla_append(skb, sizeof(opt), &opt) < 0) | 578 | if (nla_append(skb, sizeof(opt), &opt) < 0) |