aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-01-31 21:36:52 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-31 22:28:34 -0500
commit7d2681a6ff4f9ab5e48d02550b4c6338f1638998 (patch)
tree3db57231f8153682da5136081d091bdec3cf3985 /net/sched
parent5239008b0de2507a531440b8c3019fb9c116fb1a (diff)
[NET_SCHED]: sch_sfq: add support for external classifiers
Add support for external classifiers to allow using different flow hash functions similar to ESFQ. When no classifier is attached the built-in hash is used as before. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_sfq.c95
1 files changed, 91 insertions, 4 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 91af539ab6e6..d818d1985cca 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -95,6 +95,7 @@ struct sfq_sched_data
95 int limit; 95 int limit;
96 96
97/* Variables */ 97/* Variables */
98 struct tcf_proto *filter_list;
98 struct timer_list perturb_timer; 99 struct timer_list perturb_timer;
99 u32 perturbation; 100 u32 perturbation;
100 sfq_index tail; /* Index of current slot in round */ 101 sfq_index tail; /* Index of current slot in round */
@@ -155,6 +156,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
155 return sfq_fold_hash(q, h, h2); 156 return sfq_fold_hash(q, h, h2);
156} 157}
157 158
159static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
160 int *qerr)
161{
162 struct sfq_sched_data *q = qdisc_priv(sch);
163 struct tcf_result res;
164 int result;
165
166 if (TC_H_MAJ(skb->priority) == sch->handle &&
167 TC_H_MIN(skb->priority) > 0 &&
168 TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
169 return TC_H_MIN(skb->priority);
170
171 if (!q->filter_list)
172 return sfq_hash(q, skb) + 1;
173
174 *qerr = NET_XMIT_BYPASS;
175 result = tc_classify(skb, q->filter_list, &res);
176 if (result >= 0) {
177#ifdef CONFIG_NET_CLS_ACT
178 switch (result) {
179 case TC_ACT_STOLEN:
180 case TC_ACT_QUEUED:
181 *qerr = NET_XMIT_SUCCESS;
182 case TC_ACT_SHOT:
183 return 0;
184 }
185#endif
186 if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
187 return TC_H_MIN(res.classid);
188 }
189 return 0;
190}
191
158static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) 192static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
159{ 193{
160 sfq_index p, n; 194 sfq_index p, n;
@@ -245,8 +279,18 @@ static int
245sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 279sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
246{ 280{
247 struct sfq_sched_data *q = qdisc_priv(sch); 281 struct sfq_sched_data *q = qdisc_priv(sch);
248 unsigned hash = sfq_hash(q, skb); 282 unsigned int hash;
249 sfq_index x; 283 sfq_index x;
284 int ret;
285
286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) {
288 if (ret == NET_XMIT_BYPASS)
289 sch->qstats.drops++;
290 kfree_skb(skb);
291 return ret;
292 }
293 hash--;
250 294
251 x = q->ht[hash]; 295 x = q->ht[hash];
252 if (x == SFQ_DEPTH) { 296 if (x == SFQ_DEPTH) {
@@ -289,8 +333,18 @@ static int
289sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) 333sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
290{ 334{
291 struct sfq_sched_data *q = qdisc_priv(sch); 335 struct sfq_sched_data *q = qdisc_priv(sch);
292 unsigned hash = sfq_hash(q, skb); 336 unsigned int hash;
293 sfq_index x; 337 sfq_index x;
338 int ret;
339
340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) {
342 if (ret == NET_XMIT_BYPASS)
343 sch->qstats.drops++;
344 kfree_skb(skb);
345 return ret;
346 }
347 hash--;
294 348
295 x = q->ht[hash]; 349 x = q->ht[hash];
296 if (x == SFQ_DEPTH) { 350 if (x == SFQ_DEPTH) {
@@ -465,6 +519,8 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
465static void sfq_destroy(struct Qdisc *sch) 519static void sfq_destroy(struct Qdisc *sch)
466{ 520{
467 struct sfq_sched_data *q = qdisc_priv(sch); 521 struct sfq_sched_data *q = qdisc_priv(sch);
522
523 tcf_destroy_chain(q->filter_list);
468 del_timer(&q->perturb_timer); 524 del_timer(&q->perturb_timer);
469} 525}
470 526
@@ -490,9 +546,40 @@ nla_put_failure:
490 return -1; 546 return -1;
491} 547}
492 548
549static int sfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
550 struct nlattr **tca, unsigned long *arg)
551{
552 return -EOPNOTSUPP;
553}
554
555static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
556{
557 return 0;
558}
559
560static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
561{
562 struct sfq_sched_data *q = qdisc_priv(sch);
563
564 if (cl)
565 return NULL;
566 return &q->filter_list;
567}
568
569static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
570{
571 return;
572}
573
574static const struct Qdisc_class_ops sfq_class_ops = {
575 .get = sfq_get,
576 .change = sfq_change_class,
577 .tcf_chain = sfq_find_tcf,
578 .walk = sfq_walk,
579};
580
493static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { 581static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
494 .next = NULL, 582 .cl_ops = &sfq_class_ops,
495 .cl_ops = NULL,
496 .id = "sfq", 583 .id = "sfq",
497 .priv_size = sizeof(struct sfq_sched_data), 584 .priv_size = sizeof(struct sfq_sched_data),
498 .enqueue = sfq_enqueue, 585 .enqueue = sfq_enqueue,