aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_choke.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_choke.c')
-rw-r--r--net/sched/sch_choke.c161
1 files changed, 59 insertions, 102 deletions
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 3422b25df9e4..e465064d39a3 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -19,10 +19,7 @@
19#include <net/pkt_sched.h> 19#include <net/pkt_sched.h>
20#include <net/inet_ecn.h> 20#include <net/inet_ecn.h>
21#include <net/red.h> 21#include <net/red.h>
22#include <linux/ip.h> 22#include <net/flow_keys.h>
23#include <net/ip.h>
24#include <linux/ipv6.h>
25#include <net/ipv6.h>
26 23
27/* 24/*
28 CHOKe stateless AQM for fair bandwidth allocation 25 CHOKe stateless AQM for fair bandwidth allocation
@@ -60,6 +57,7 @@ struct choke_sched_data {
60 struct red_parms parms; 57 struct red_parms parms;
61 58
62/* Variables */ 59/* Variables */
60 struct red_vars vars;
63 struct tcf_proto *filter_list; 61 struct tcf_proto *filter_list;
64 struct { 62 struct {
65 u32 prob_drop; /* Early probability drops */ 63 u32 prob_drop; /* Early probability drops */
@@ -142,85 +140,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
142 --sch->q.qlen; 140 --sch->q.qlen;
143} 141}
144 142
145/*
146 * Compare flow of two packets
147 * Returns true only if source and destination address and port match.
148 * false for special cases
149 */
150static bool choke_match_flow(struct sk_buff *skb1,
151 struct sk_buff *skb2)
152{
153 int off1, off2, poff;
154 const u32 *ports1, *ports2;
155 u8 ip_proto;
156 __u32 hash1;
157
158 if (skb1->protocol != skb2->protocol)
159 return false;
160
161 /* Use hash value as quick check
162 * Assumes that __skb_get_rxhash makes IP header and ports linear
163 */
164 hash1 = skb_get_rxhash(skb1);
165 if (!hash1 || hash1 != skb_get_rxhash(skb2))
166 return false;
167
168 /* Probably match, but be sure to avoid hash collisions */
169 off1 = skb_network_offset(skb1);
170 off2 = skb_network_offset(skb2);
171
172 switch (skb1->protocol) {
173 case __constant_htons(ETH_P_IP): {
174 const struct iphdr *ip1, *ip2;
175
176 ip1 = (const struct iphdr *) (skb1->data + off1);
177 ip2 = (const struct iphdr *) (skb2->data + off2);
178
179 ip_proto = ip1->protocol;
180 if (ip_proto != ip2->protocol ||
181 ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
182 return false;
183
184 if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
185 ip_proto = 0;
186 off1 += ip1->ihl * 4;
187 off2 += ip2->ihl * 4;
188 break;
189 }
190
191 case __constant_htons(ETH_P_IPV6): {
192 const struct ipv6hdr *ip1, *ip2;
193
194 ip1 = (const struct ipv6hdr *) (skb1->data + off1);
195 ip2 = (const struct ipv6hdr *) (skb2->data + off2);
196
197 ip_proto = ip1->nexthdr;
198 if (ip_proto != ip2->nexthdr ||
199 ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
200 ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
201 return false;
202 off1 += 40;
203 off2 += 40;
204 }
205
206 default: /* Maybe compare MAC header here? */
207 return false;
208 }
209
210 poff = proto_ports_offset(ip_proto);
211 if (poff < 0)
212 return true;
213
214 off1 += poff;
215 off2 += poff;
216
217 ports1 = (__force u32 *)(skb1->data + off1);
218 ports2 = (__force u32 *)(skb2->data + off2);
219 return *ports1 == *ports2;
220}
221
222struct choke_skb_cb { 143struct choke_skb_cb {
223 u16 classid; 144 u16 classid;
145 u8 keys_valid;
146 struct flow_keys keys;
224}; 147};
225 148
226static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -241,6 +164,32 @@ static u16 choke_get_classid(const struct sk_buff *skb)
241} 164}
242 165
243/* 166/*
167 * Compare flow of two packets
168 * Returns true only if source and destination address and port match.
169 * false for special cases
170 */
171static bool choke_match_flow(struct sk_buff *skb1,
172 struct sk_buff *skb2)
173{
174 if (skb1->protocol != skb2->protocol)
175 return false;
176
177 if (!choke_skb_cb(skb1)->keys_valid) {
178 choke_skb_cb(skb1)->keys_valid = 1;
179 skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
180 }
181
182 if (!choke_skb_cb(skb2)->keys_valid) {
183 choke_skb_cb(skb2)->keys_valid = 1;
184 skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
185 }
186
187 return !memcmp(&choke_skb_cb(skb1)->keys,
188 &choke_skb_cb(skb2)->keys,
189 sizeof(struct flow_keys));
190}
191
192/*
244 * Classify flow using either: 193 * Classify flow using either:
245 * 1. pre-existing classification result in skb 194 * 1. pre-existing classification result in skb
246 * 2. fast internal classification 195 * 2. fast internal classification
@@ -317,7 +266,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
317static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) 266static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
318{ 267{
319 struct choke_sched_data *q = qdisc_priv(sch); 268 struct choke_sched_data *q = qdisc_priv(sch);
320 struct red_parms *p = &q->parms; 269 const struct red_parms *p = &q->parms;
321 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 270 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
322 271
323 if (q->filter_list) { 272 if (q->filter_list) {
@@ -326,14 +275,15 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
326 goto other_drop; /* Packet was eaten by filter */ 275 goto other_drop; /* Packet was eaten by filter */
327 } 276 }
328 277
278 choke_skb_cb(skb)->keys_valid = 0;
329 /* Compute average queue usage (see RED) */ 279 /* Compute average queue usage (see RED) */
330 p->qavg = red_calc_qavg(p, sch->q.qlen); 280 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
331 if (red_is_idling(p)) 281 if (red_is_idling(&q->vars))
332 red_end_of_idle_period(p); 282 red_end_of_idle_period(&q->vars);
333 283
334 /* Is queue small? */ 284 /* Is queue small? */
335 if (p->qavg <= p->qth_min) 285 if (q->vars.qavg <= p->qth_min)
336 p->qcount = -1; 286 q->vars.qcount = -1;
337 else { 287 else {
338 unsigned int idx; 288 unsigned int idx;
339 289
@@ -345,8 +295,8 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
345 } 295 }
346 296
347 /* Queue is large, always mark/drop */ 297 /* Queue is large, always mark/drop */
348 if (p->qavg > p->qth_max) { 298 if (q->vars.qavg > p->qth_max) {
349 p->qcount = -1; 299 q->vars.qcount = -1;
350 300
351 sch->qstats.overlimits++; 301 sch->qstats.overlimits++;
352 if (use_harddrop(q) || !use_ecn(q) || 302 if (use_harddrop(q) || !use_ecn(q) ||
@@ -356,10 +306,10 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
356 } 306 }
357 307
358 q->stats.forced_mark++; 308 q->stats.forced_mark++;
359 } else if (++p->qcount) { 309 } else if (++q->vars.qcount) {
360 if (red_mark_probability(p, p->qavg)) { 310 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
361 p->qcount = 0; 311 q->vars.qcount = 0;
362 p->qR = red_random(p); 312 q->vars.qR = red_random(p);
363 313
364 sch->qstats.overlimits++; 314 sch->qstats.overlimits++;
365 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { 315 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
@@ -370,7 +320,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
370 q->stats.prob_mark++; 320 q->stats.prob_mark++;
371 } 321 }
372 } else 322 } else
373 p->qR = red_random(p); 323 q->vars.qR = red_random(p);
374 } 324 }
375 325
376 /* Admit new packet */ 326 /* Admit new packet */
@@ -404,8 +354,8 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
404 struct sk_buff *skb; 354 struct sk_buff *skb;
405 355
406 if (q->head == q->tail) { 356 if (q->head == q->tail) {
407 if (!red_is_idling(&q->parms)) 357 if (!red_is_idling(&q->vars))
408 red_start_of_idle_period(&q->parms); 358 red_start_of_idle_period(&q->vars);
409 return NULL; 359 return NULL;
410 } 360 }
411 361
@@ -428,8 +378,8 @@ static unsigned int choke_drop(struct Qdisc *sch)
428 if (len > 0) 378 if (len > 0)
429 q->stats.other++; 379 q->stats.other++;
430 else { 380 else {
431 if (!red_is_idling(&q->parms)) 381 if (!red_is_idling(&q->vars))
432 red_start_of_idle_period(&q->parms); 382 red_start_of_idle_period(&q->vars);
433 } 383 }
434 384
435 return len; 385 return len;
@@ -439,12 +389,13 @@ static void choke_reset(struct Qdisc *sch)
439{ 389{
440 struct choke_sched_data *q = qdisc_priv(sch); 390 struct choke_sched_data *q = qdisc_priv(sch);
441 391
442 red_restart(&q->parms); 392 red_restart(&q->vars);
443} 393}
444 394
445static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { 395static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
446 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) }, 396 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
447 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE }, 397 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
398 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
448}; 399};
449 400
450 401
@@ -466,6 +417,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
466 int err; 417 int err;
467 struct sk_buff **old = NULL; 418 struct sk_buff **old = NULL;
468 unsigned int mask; 419 unsigned int mask;
420 u32 max_P;
469 421
470 if (opt == NULL) 422 if (opt == NULL)
471 return -EINVAL; 423 return -EINVAL;
@@ -478,6 +430,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
478 tb[TCA_CHOKE_STAB] == NULL) 430 tb[TCA_CHOKE_STAB] == NULL)
479 return -EINVAL; 431 return -EINVAL;
480 432
433 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
434
481 ctl = nla_data(tb[TCA_CHOKE_PARMS]); 435 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
482 436
483 if (ctl->limit > CHOKE_MAX_QUEUE) 437 if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -527,10 +481,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
527 481
528 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, 482 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
529 ctl->Plog, ctl->Scell_log, 483 ctl->Plog, ctl->Scell_log,
530 nla_data(tb[TCA_CHOKE_STAB])); 484 nla_data(tb[TCA_CHOKE_STAB]),
485 max_P);
486 red_set_vars(&q->vars);
531 487
532 if (q->head == q->tail) 488 if (q->head == q->tail)
533 red_end_of_idle_period(&q->parms); 489 red_end_of_idle_period(&q->vars);
534 490
535 sch_tree_unlock(sch); 491 sch_tree_unlock(sch);
536 choke_free(old); 492 choke_free(old);
@@ -561,6 +517,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
561 goto nla_put_failure; 517 goto nla_put_failure;
562 518
563 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); 519 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
520 NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
564 return nla_nest_end(skb, opts); 521 return nla_nest_end(skb, opts);
565 522
566nla_put_failure: 523nla_put_failure: