aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2005-11-05 15:14:28 -0500
committerThomas Graf <tgr@axs.localdomain>2005-11-05 16:02:29 -0500
commitbdc450a0bb1d48144ced1f899cc8366ec8e85024 (patch)
tree77924b88ae2f9ddc702288e439756800a02988ab /net
parentb38c7eef7e536d12051cc3d5864032f2f907cdfe (diff)
[PKT_SCHED]: (G)RED: Introduce hard dropping
Introduces a new flag TC_RED_HARDDROP which specifies that if ECN marking is enabled packets should still be dropped once the average queue length exceeds the maximum threshold. This _may_ help to avoid global synchronisation during small bursts of peers advertising but not caring about ECN. Use this option very carefully, it does more harm than good if (qth_max - qth_min) does not cover at least two average burst cycles. The difference to the current behaviour, in which we'd run into the hard queue limit, is that due to the low pass filter of RED short bursts are less likely to cause a global synchronisation. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_gred.c8
-rw-r--r--net/sched/sch_red.c8
2 files changed, 14 insertions, 2 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 079b0a4ea1c2..29a2dd9f3029 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -146,6 +146,11 @@ static inline int gred_use_ecn(struct gred_sched *t)
146 return t->red_flags & TC_RED_ECN; 146 return t->red_flags & TC_RED_ECN;
147} 147}
148 148
149static inline int gred_use_harddrop(struct gred_sched *t)
150{
151 return t->red_flags & TC_RED_HARDDROP;
152}
153
149static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) 154static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
150{ 155{
151 struct gred_sched_data *q=NULL; 156 struct gred_sched_data *q=NULL;
@@ -214,7 +219,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
214 219
215 case RED_HARD_MARK: 220 case RED_HARD_MARK:
216 sch->qstats.overlimits++; 221 sch->qstats.overlimits++;
217 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { 222 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
223 !INET_ECN_set_ce(skb)) {
218 q->stats.forced_drop++; 224 q->stats.forced_drop++;
219 goto congestion_drop; 225 goto congestion_drop;
220 } 226 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 0d89dee751a9..dccfa44c2d71 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -51,6 +51,11 @@ static inline int red_use_ecn(struct red_sched_data *q)
51 return q->flags & TC_RED_ECN; 51 return q->flags & TC_RED_ECN;
52} 52}
53 53
54static inline int red_use_harddrop(struct red_sched_data *q)
55{
56 return q->flags & TC_RED_HARDDROP;
57}
58
54static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) 59static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
55{ 60{
56 struct red_sched_data *q = qdisc_priv(sch); 61 struct red_sched_data *q = qdisc_priv(sch);
@@ -76,7 +81,8 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
76 81
77 case RED_HARD_MARK: 82 case RED_HARD_MARK:
78 sch->qstats.overlimits++; 83 sch->qstats.overlimits++;
79 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { 84 if (red_use_harddrop(q) || !red_use_ecn(q) ||
85 !INET_ECN_set_ce(skb)) {
80 q->stats.forced_drop++; 86 q->stats.forced_drop++;
81 goto congestion_drop; 87 goto congestion_drop;
82 } 88 }