diff options
author | Patrick McHardy <kaber@trash.net> | 2008-01-23 01:11:17 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:11:10 -0500 |
commit | 1e90474c377e92db7262a8968a45c1dd980ca9e5 (patch) | |
tree | 645af56dcb17cf1a76fd3b7f1a8b833a3fffc3d7 /net/sched/sch_gred.c | |
parent | 01480e1cf5e2118eba8a8968239f3242072f9563 (diff) |
[NET_SCHED]: Convert packet schedulers from rtnetlink to new netlink API
Convert packet schedulers to use the netlink API. Unfortunately a gradual
conversion is not possible without breaking compilation in the middle or
adding lots of casts, so this patch converts them all in one step. The
patch has been mostly generated automatically with some minor edits to
at least allow seperate conversion of classifiers and actions.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_gred.c')
-rw-r--r-- | net/sched/sch_gred.c | 59 |
1 files changed, 32 insertions, 27 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index e2bcd6682c70..6b784838a534 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -350,16 +350,16 @@ static inline void gred_destroy_vq(struct gred_sched_data *q) | |||
350 | kfree(q); | 350 | kfree(q); |
351 | } | 351 | } |
352 | 352 | ||
353 | static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps) | 353 | static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) |
354 | { | 354 | { |
355 | struct gred_sched *table = qdisc_priv(sch); | 355 | struct gred_sched *table = qdisc_priv(sch); |
356 | struct tc_gred_sopt *sopt; | 356 | struct tc_gred_sopt *sopt; |
357 | int i; | 357 | int i; |
358 | 358 | ||
359 | if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt)) | 359 | if (dps == NULL || nla_len(dps) < sizeof(*sopt)) |
360 | return -EINVAL; | 360 | return -EINVAL; |
361 | 361 | ||
362 | sopt = RTA_DATA(dps); | 362 | sopt = nla_data(dps); |
363 | 363 | ||
364 | if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs) | 364 | if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs) |
365 | return -EINVAL; | 365 | return -EINVAL; |
@@ -425,28 +425,28 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, | |||
425 | return 0; | 425 | return 0; |
426 | } | 426 | } |
427 | 427 | ||
428 | static int gred_change(struct Qdisc *sch, struct rtattr *opt) | 428 | static int gred_change(struct Qdisc *sch, struct nlattr *opt) |
429 | { | 429 | { |
430 | struct gred_sched *table = qdisc_priv(sch); | 430 | struct gred_sched *table = qdisc_priv(sch); |
431 | struct tc_gred_qopt *ctl; | 431 | struct tc_gred_qopt *ctl; |
432 | struct rtattr *tb[TCA_GRED_MAX]; | 432 | struct nlattr *tb[TCA_GRED_MAX + 1]; |
433 | int err = -EINVAL, prio = GRED_DEF_PRIO; | 433 | int err = -EINVAL, prio = GRED_DEF_PRIO; |
434 | u8 *stab; | 434 | u8 *stab; |
435 | 435 | ||
436 | if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt)) | 436 | if (opt == NULL || nla_parse_nested(tb, TCA_GRED_MAX, opt, NULL)) |
437 | return -EINVAL; | 437 | return -EINVAL; |
438 | 438 | ||
439 | if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL) | 439 | if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) |
440 | return gred_change_table_def(sch, opt); | 440 | return gred_change_table_def(sch, opt); |
441 | 441 | ||
442 | if (tb[TCA_GRED_PARMS-1] == NULL || | 442 | if (tb[TCA_GRED_PARMS] == NULL || |
443 | RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) || | 443 | nla_len(tb[TCA_GRED_PARMS]) < sizeof(*ctl) || |
444 | tb[TCA_GRED_STAB-1] == NULL || | 444 | tb[TCA_GRED_STAB] == NULL || |
445 | RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256) | 445 | nla_len(tb[TCA_GRED_STAB]) < 256) |
446 | return -EINVAL; | 446 | return -EINVAL; |
447 | 447 | ||
448 | ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]); | 448 | ctl = nla_data(tb[TCA_GRED_PARMS]); |
449 | stab = RTA_DATA(tb[TCA_GRED_STAB-1]); | 449 | stab = nla_data(tb[TCA_GRED_STAB]); |
450 | 450 | ||
451 | if (ctl->DP >= table->DPs) | 451 | if (ctl->DP >= table->DPs) |
452 | goto errout; | 452 | goto errout; |
@@ -486,23 +486,23 @@ errout: | |||
486 | return err; | 486 | return err; |
487 | } | 487 | } |
488 | 488 | ||
489 | static int gred_init(struct Qdisc *sch, struct rtattr *opt) | 489 | static int gred_init(struct Qdisc *sch, struct nlattr *opt) |
490 | { | 490 | { |
491 | struct rtattr *tb[TCA_GRED_MAX]; | 491 | struct nlattr *tb[TCA_GRED_MAX + 1]; |
492 | 492 | ||
493 | if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt)) | 493 | if (opt == NULL || nla_parse_nested(tb, TCA_GRED_MAX, opt, NULL)) |
494 | return -EINVAL; | 494 | return -EINVAL; |
495 | 495 | ||
496 | if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1]) | 496 | if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) |
497 | return -EINVAL; | 497 | return -EINVAL; |
498 | 498 | ||
499 | return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]); | 499 | return gred_change_table_def(sch, tb[TCA_GRED_DPS]); |
500 | } | 500 | } |
501 | 501 | ||
502 | static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | 502 | static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) |
503 | { | 503 | { |
504 | struct gred_sched *table = qdisc_priv(sch); | 504 | struct gred_sched *table = qdisc_priv(sch); |
505 | struct rtattr *parms, *opts = NULL; | 505 | struct nlattr *parms, *opts = NULL; |
506 | int i; | 506 | int i; |
507 | struct tc_gred_sopt sopt = { | 507 | struct tc_gred_sopt sopt = { |
508 | .DPs = table->DPs, | 508 | .DPs = table->DPs, |
@@ -511,9 +511,13 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
511 | .flags = table->red_flags, | 511 | .flags = table->red_flags, |
512 | }; | 512 | }; |
513 | 513 | ||
514 | opts = RTA_NEST(skb, TCA_OPTIONS); | 514 | opts = nla_nest_start(skb, TCA_OPTIONS); |
515 | RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); | 515 | if (opts == NULL) |
516 | parms = RTA_NEST(skb, TCA_GRED_PARMS); | 516 | goto nla_put_failure; |
517 | NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); | ||
518 | parms = nla_nest_start(skb, TCA_GRED_PARMS); | ||
519 | if (parms == NULL) | ||
520 | goto nla_put_failure; | ||
517 | 521 | ||
518 | for (i = 0; i < MAX_DPs; i++) { | 522 | for (i = 0; i < MAX_DPs; i++) { |
519 | struct gred_sched_data *q = table->tab[i]; | 523 | struct gred_sched_data *q = table->tab[i]; |
@@ -555,15 +559,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
555 | opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); | 559 | opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); |
556 | 560 | ||
557 | append_opt: | 561 | append_opt: |
558 | RTA_APPEND(skb, sizeof(opt), &opt); | 562 | if (nla_append(skb, sizeof(opt), &opt) < 0) |
563 | goto nla_put_failure; | ||
559 | } | 564 | } |
560 | 565 | ||
561 | RTA_NEST_END(skb, parms); | 566 | nla_nest_end(skb, parms); |
562 | 567 | ||
563 | return RTA_NEST_END(skb, opts); | 568 | return nla_nest_end(skb, opts); |
564 | 569 | ||
565 | rtattr_failure: | 570 | nla_put_failure: |
566 | return RTA_NEST_CANCEL(skb, opts); | 571 | return nla_nest_cancel(skb, opts); |
567 | } | 572 | } |
568 | 573 | ||
569 | static void gred_destroy(struct Qdisc *sch) | 574 | static void gred_destroy(struct Qdisc *sch) |