diff options
Diffstat (limited to 'net/sched/sch_prio.c')
-rw-r--r-- | net/sched/sch_prio.c | 143 |
1 files changed, 21 insertions, 122 deletions
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 5532f1031ab5..f849243eb095 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -24,11 +24,9 @@ | |||
24 | struct prio_sched_data | 24 | struct prio_sched_data |
25 | { | 25 | { |
26 | int bands; | 26 | int bands; |
27 | int curband; /* for round-robin */ | ||
28 | struct tcf_proto *filter_list; | 27 | struct tcf_proto *filter_list; |
29 | u8 prio2band[TC_PRIO_MAX+1]; | 28 | u8 prio2band[TC_PRIO_MAX+1]; |
30 | struct Qdisc *queues[TCQ_PRIO_BANDS]; | 29 | struct Qdisc *queues[TCQ_PRIO_BANDS]; |
31 | int mq; | ||
32 | }; | 30 | }; |
33 | 31 | ||
34 | 32 | ||
@@ -55,17 +53,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
55 | if (!q->filter_list || err < 0) { | 53 | if (!q->filter_list || err < 0) { |
56 | if (TC_H_MAJ(band)) | 54 | if (TC_H_MAJ(band)) |
57 | band = 0; | 55 | band = 0; |
58 | band = q->prio2band[band&TC_PRIO_MAX]; | 56 | return q->queues[q->prio2band[band&TC_PRIO_MAX]]; |
59 | goto out; | ||
60 | } | 57 | } |
61 | band = res.classid; | 58 | band = res.classid; |
62 | } | 59 | } |
63 | band = TC_H_MIN(band) - 1; | 60 | band = TC_H_MIN(band) - 1; |
64 | if (band >= q->bands) | 61 | if (band >= q->bands) |
65 | band = q->prio2band[0]; | 62 | return q->queues[q->prio2band[0]]; |
66 | out: | 63 | |
67 | if (q->mq) | ||
68 | skb_set_queue_mapping(skb, band); | ||
69 | return q->queues[band]; | 64 | return q->queues[band]; |
70 | } | 65 | } |
71 | 66 | ||
@@ -86,8 +81,9 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
86 | } | 81 | } |
87 | #endif | 82 | #endif |
88 | 83 | ||
89 | if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { | 84 | ret = qdisc_enqueue(skb, qdisc); |
90 | sch->bstats.bytes += skb->len; | 85 | if (ret == NET_XMIT_SUCCESS) { |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
91 | sch->bstats.packets++; | 87 | sch->bstats.packets++; |
92 | sch->q.qlen++; | 88 | sch->q.qlen++; |
93 | return NET_XMIT_SUCCESS; | 89 | return NET_XMIT_SUCCESS; |
@@ -123,67 +119,23 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
123 | } | 119 | } |
124 | 120 | ||
125 | 121 | ||
126 | static struct sk_buff * | 122 | static struct sk_buff *prio_dequeue(struct Qdisc* sch) |
127 | prio_dequeue(struct Qdisc* sch) | ||
128 | { | 123 | { |
129 | struct sk_buff *skb; | ||
130 | struct prio_sched_data *q = qdisc_priv(sch); | 124 | struct prio_sched_data *q = qdisc_priv(sch); |
131 | int prio; | 125 | int prio; |
132 | struct Qdisc *qdisc; | ||
133 | 126 | ||
134 | for (prio = 0; prio < q->bands; prio++) { | 127 | for (prio = 0; prio < q->bands; prio++) { |
135 | /* Check if the target subqueue is available before | 128 | struct Qdisc *qdisc = q->queues[prio]; |
136 | * pulling an skb. This way we avoid excessive requeues | 129 | struct sk_buff *skb = qdisc->dequeue(qdisc); |
137 | * for slower queues. | 130 | if (skb) { |
138 | */ | 131 | sch->q.qlen--; |
139 | if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { | 132 | return skb; |
140 | qdisc = q->queues[prio]; | ||
141 | skb = qdisc->dequeue(qdisc); | ||
142 | if (skb) { | ||
143 | sch->q.qlen--; | ||
144 | return skb; | ||
145 | } | ||
146 | } | 133 | } |
147 | } | 134 | } |
148 | return NULL; | 135 | return NULL; |
149 | 136 | ||
150 | } | 137 | } |
151 | 138 | ||
152 | static struct sk_buff *rr_dequeue(struct Qdisc* sch) | ||
153 | { | ||
154 | struct sk_buff *skb; | ||
155 | struct prio_sched_data *q = qdisc_priv(sch); | ||
156 | struct Qdisc *qdisc; | ||
157 | int bandcount; | ||
158 | |||
159 | /* Only take one pass through the queues. If nothing is available, | ||
160 | * return nothing. | ||
161 | */ | ||
162 | for (bandcount = 0; bandcount < q->bands; bandcount++) { | ||
163 | /* Check if the target subqueue is available before | ||
164 | * pulling an skb. This way we avoid excessive requeues | ||
165 | * for slower queues. If the queue is stopped, try the | ||
166 | * next queue. | ||
167 | */ | ||
168 | if (!__netif_subqueue_stopped(sch->dev, | ||
169 | (q->mq ? q->curband : 0))) { | ||
170 | qdisc = q->queues[q->curband]; | ||
171 | skb = qdisc->dequeue(qdisc); | ||
172 | if (skb) { | ||
173 | sch->q.qlen--; | ||
174 | q->curband++; | ||
175 | if (q->curband >= q->bands) | ||
176 | q->curband = 0; | ||
177 | return skb; | ||
178 | } | ||
179 | } | ||
180 | q->curband++; | ||
181 | if (q->curband >= q->bands) | ||
182 | q->curband = 0; | ||
183 | } | ||
184 | return NULL; | ||
185 | } | ||
186 | |||
187 | static unsigned int prio_drop(struct Qdisc* sch) | 139 | static unsigned int prio_drop(struct Qdisc* sch) |
188 | { | 140 | { |
189 | struct prio_sched_data *q = qdisc_priv(sch); | 141 | struct prio_sched_data *q = qdisc_priv(sch); |
@@ -228,45 +180,22 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
228 | { | 180 | { |
229 | struct prio_sched_data *q = qdisc_priv(sch); | 181 | struct prio_sched_data *q = qdisc_priv(sch); |
230 | struct tc_prio_qopt *qopt; | 182 | struct tc_prio_qopt *qopt; |
231 | struct nlattr *tb[TCA_PRIO_MAX + 1]; | ||
232 | int err; | ||
233 | int i; | 183 | int i; |
234 | 184 | ||
235 | err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt, | 185 | if (nla_len(opt) < sizeof(*qopt)) |
236 | sizeof(*qopt)); | 186 | return -EINVAL; |
237 | if (err < 0) | 187 | qopt = nla_data(opt); |
238 | return err; | ||
239 | |||
240 | q->bands = qopt->bands; | ||
241 | /* If we're multiqueue, make sure the number of incoming bands | ||
242 | * matches the number of queues on the device we're associating with. | ||
243 | * If the number of bands requested is zero, then set q->bands to | ||
244 | * dev->egress_subqueue_count. Also, the root qdisc must be the | ||
245 | * only one that is enabled for multiqueue, since it's the only one | ||
246 | * that interacts with the underlying device. | ||
247 | */ | ||
248 | q->mq = nla_get_flag(tb[TCA_PRIO_MQ]); | ||
249 | if (q->mq) { | ||
250 | if (sch->parent != TC_H_ROOT) | ||
251 | return -EINVAL; | ||
252 | if (netif_is_multiqueue(sch->dev)) { | ||
253 | if (q->bands == 0) | ||
254 | q->bands = sch->dev->egress_subqueue_count; | ||
255 | else if (q->bands != sch->dev->egress_subqueue_count) | ||
256 | return -EINVAL; | ||
257 | } else | ||
258 | return -EOPNOTSUPP; | ||
259 | } | ||
260 | 188 | ||
261 | if (q->bands > TCQ_PRIO_BANDS || q->bands < 2) | 189 | if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) |
262 | return -EINVAL; | 190 | return -EINVAL; |
263 | 191 | ||
264 | for (i=0; i<=TC_PRIO_MAX; i++) { | 192 | for (i=0; i<=TC_PRIO_MAX; i++) { |
265 | if (qopt->priomap[i] >= q->bands) | 193 | if (qopt->priomap[i] >= qopt->bands) |
266 | return -EINVAL; | 194 | return -EINVAL; |
267 | } | 195 | } |
268 | 196 | ||
269 | sch_tree_lock(sch); | 197 | sch_tree_lock(sch); |
198 | q->bands = qopt->bands; | ||
270 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); | 199 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); |
271 | 200 | ||
272 | for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { | 201 | for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { |
@@ -281,7 +210,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
281 | for (i=0; i<q->bands; i++) { | 210 | for (i=0; i<q->bands; i++) { |
282 | if (q->queues[i] == &noop_qdisc) { | 211 | if (q->queues[i] == &noop_qdisc) { |
283 | struct Qdisc *child; | 212 | struct Qdisc *child; |
284 | child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 213 | child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
214 | &pfifo_qdisc_ops, | ||
285 | TC_H_MAKE(sch->handle, i + 1)); | 215 | TC_H_MAKE(sch->handle, i + 1)); |
286 | if (child) { | 216 | if (child) { |
287 | sch_tree_lock(sch); | 217 | sch_tree_lock(sch); |
@@ -331,10 +261,6 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
331 | nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); | 261 | nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); |
332 | if (nest == NULL) | 262 | if (nest == NULL) |
333 | goto nla_put_failure; | 263 | goto nla_put_failure; |
334 | if (q->mq) { | ||
335 | if (nla_put_flag(skb, TCA_PRIO_MQ) < 0) | ||
336 | goto nla_put_failure; | ||
337 | } | ||
338 | nla_nest_compat_end(skb, nest); | 264 | nla_nest_compat_end(skb, nest); |
339 | 265 | ||
340 | return skb->len; | 266 | return skb->len; |
@@ -507,44 +433,17 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = { | |||
507 | .owner = THIS_MODULE, | 433 | .owner = THIS_MODULE, |
508 | }; | 434 | }; |
509 | 435 | ||
510 | static struct Qdisc_ops rr_qdisc_ops __read_mostly = { | ||
511 | .next = NULL, | ||
512 | .cl_ops = &prio_class_ops, | ||
513 | .id = "rr", | ||
514 | .priv_size = sizeof(struct prio_sched_data), | ||
515 | .enqueue = prio_enqueue, | ||
516 | .dequeue = rr_dequeue, | ||
517 | .requeue = prio_requeue, | ||
518 | .drop = prio_drop, | ||
519 | .init = prio_init, | ||
520 | .reset = prio_reset, | ||
521 | .destroy = prio_destroy, | ||
522 | .change = prio_tune, | ||
523 | .dump = prio_dump, | ||
524 | .owner = THIS_MODULE, | ||
525 | }; | ||
526 | |||
527 | static int __init prio_module_init(void) | 436 | static int __init prio_module_init(void) |
528 | { | 437 | { |
529 | int err; | 438 | return register_qdisc(&prio_qdisc_ops); |
530 | |||
531 | err = register_qdisc(&prio_qdisc_ops); | ||
532 | if (err < 0) | ||
533 | return err; | ||
534 | err = register_qdisc(&rr_qdisc_ops); | ||
535 | if (err < 0) | ||
536 | unregister_qdisc(&prio_qdisc_ops); | ||
537 | return err; | ||
538 | } | 439 | } |
539 | 440 | ||
540 | static void __exit prio_module_exit(void) | 441 | static void __exit prio_module_exit(void) |
541 | { | 442 | { |
542 | unregister_qdisc(&prio_qdisc_ops); | 443 | unregister_qdisc(&prio_qdisc_ops); |
543 | unregister_qdisc(&rr_qdisc_ops); | ||
544 | } | 444 | } |
545 | 445 | ||
546 | module_init(prio_module_init) | 446 | module_init(prio_module_init) |
547 | module_exit(prio_module_exit) | 447 | module_exit(prio_module_exit) |
548 | 448 | ||
549 | MODULE_LICENSE("GPL"); | 449 | MODULE_LICENSE("GPL"); |
550 | MODULE_ALIAS("sch_rr"); | ||