aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_hfsc.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-01-23 01:11:17 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:11:10 -0500
commit1e90474c377e92db7262a8968a45c1dd980ca9e5 (patch)
tree645af56dcb17cf1a76fd3b7f1a8b833a3fffc3d7 /net/sched/sch_hfsc.c
parent01480e1cf5e2118eba8a8968239f3242072f9563 (diff)
[NET_SCHED]: Convert packet schedulers from rtnetlink to new netlink API
Convert packet schedulers to use the netlink API. Unfortunately a gradual conversion is not possible without breaking compilation in the middle or adding lots of casts, so this patch converts them all in one step. The patch has been mostly generated automatically with some minor edits to at least allow seperate conversion of classifiers and actions. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_hfsc.c')
-rw-r--r--net/sched/sch_hfsc.c72
1 files changed, 36 insertions, 36 deletions
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 69dc3bccf024..4e6a164d3058 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -988,39 +988,39 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
988 988
989static int 989static int
990hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 990hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
991 struct rtattr **tca, unsigned long *arg) 991 struct nlattr **tca, unsigned long *arg)
992{ 992{
993 struct hfsc_sched *q = qdisc_priv(sch); 993 struct hfsc_sched *q = qdisc_priv(sch);
994 struct hfsc_class *cl = (struct hfsc_class *)*arg; 994 struct hfsc_class *cl = (struct hfsc_class *)*arg;
995 struct hfsc_class *parent = NULL; 995 struct hfsc_class *parent = NULL;
996 struct rtattr *opt = tca[TCA_OPTIONS-1]; 996 struct nlattr *opt = tca[TCA_OPTIONS];
997 struct rtattr *tb[TCA_HFSC_MAX]; 997 struct nlattr *tb[TCA_HFSC_MAX + 1];
998 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; 998 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
999 u64 cur_time; 999 u64 cur_time;
1000 1000
1001 if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt)) 1001 if (opt == NULL || nla_parse_nested(tb, TCA_HFSC_MAX, opt, NULL))
1002 return -EINVAL; 1002 return -EINVAL;
1003 1003
1004 if (tb[TCA_HFSC_RSC-1]) { 1004 if (tb[TCA_HFSC_RSC]) {
1005 if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc)) 1005 if (nla_len(tb[TCA_HFSC_RSC]) < sizeof(*rsc))
1006 return -EINVAL; 1006 return -EINVAL;
1007 rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]); 1007 rsc = nla_data(tb[TCA_HFSC_RSC]);
1008 if (rsc->m1 == 0 && rsc->m2 == 0) 1008 if (rsc->m1 == 0 && rsc->m2 == 0)
1009 rsc = NULL; 1009 rsc = NULL;
1010 } 1010 }
1011 1011
1012 if (tb[TCA_HFSC_FSC-1]) { 1012 if (tb[TCA_HFSC_FSC]) {
1013 if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc)) 1013 if (nla_len(tb[TCA_HFSC_FSC]) < sizeof(*fsc))
1014 return -EINVAL; 1014 return -EINVAL;
1015 fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]); 1015 fsc = nla_data(tb[TCA_HFSC_FSC]);
1016 if (fsc->m1 == 0 && fsc->m2 == 0) 1016 if (fsc->m1 == 0 && fsc->m2 == 0)
1017 fsc = NULL; 1017 fsc = NULL;
1018 } 1018 }
1019 1019
1020 if (tb[TCA_HFSC_USC-1]) { 1020 if (tb[TCA_HFSC_USC]) {
1021 if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc)) 1021 if (nla_len(tb[TCA_HFSC_USC]) < sizeof(*usc))
1022 return -EINVAL; 1022 return -EINVAL;
1023 usc = RTA_DATA(tb[TCA_HFSC_USC-1]); 1023 usc = nla_data(tb[TCA_HFSC_USC]);
1024 if (usc->m1 == 0 && usc->m2 == 0) 1024 if (usc->m1 == 0 && usc->m2 == 0)
1025 usc = NULL; 1025 usc = NULL;
1026 } 1026 }
@@ -1050,10 +1050,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1050 } 1050 }
1051 sch_tree_unlock(sch); 1051 sch_tree_unlock(sch);
1052 1052
1053 if (tca[TCA_RATE-1]) 1053 if (tca[TCA_RATE])
1054 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1054 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1055 &sch->dev->queue_lock, 1055 &sch->dev->queue_lock,
1056 tca[TCA_RATE-1]); 1056 tca[TCA_RATE]);
1057 return 0; 1057 return 0;
1058 } 1058 }
1059 1059
@@ -1106,9 +1106,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1106 cl->cl_pcvtoff = parent->cl_cvtoff; 1106 cl->cl_pcvtoff = parent->cl_cvtoff;
1107 sch_tree_unlock(sch); 1107 sch_tree_unlock(sch);
1108 1108
1109 if (tca[TCA_RATE-1]) 1109 if (tca[TCA_RATE])
1110 gen_new_estimator(&cl->bstats, &cl->rate_est, 1110 gen_new_estimator(&cl->bstats, &cl->rate_est,
1111 &sch->dev->queue_lock, tca[TCA_RATE-1]); 1111 &sch->dev->queue_lock, tca[TCA_RATE]);
1112 *arg = (unsigned long)cl; 1112 *arg = (unsigned long)cl;
1113 return 0; 1113 return 0;
1114} 1114}
@@ -1304,11 +1304,11 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1304 tsc.m1 = sm2m(sc->sm1); 1304 tsc.m1 = sm2m(sc->sm1);
1305 tsc.d = dx2d(sc->dx); 1305 tsc.d = dx2d(sc->dx);
1306 tsc.m2 = sm2m(sc->sm2); 1306 tsc.m2 = sm2m(sc->sm2);
1307 RTA_PUT(skb, attr, sizeof(tsc), &tsc); 1307 NLA_PUT(skb, attr, sizeof(tsc), &tsc);
1308 1308
1309 return skb->len; 1309 return skb->len;
1310 1310
1311 rtattr_failure: 1311 nla_put_failure:
1312 return -1; 1312 return -1;
1313} 1313}
1314 1314
@@ -1317,19 +1317,19 @@ hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1317{ 1317{
1318 if ((cl->cl_flags & HFSC_RSC) && 1318 if ((cl->cl_flags & HFSC_RSC) &&
1319 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) 1319 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1320 goto rtattr_failure; 1320 goto nla_put_failure;
1321 1321
1322 if ((cl->cl_flags & HFSC_FSC) && 1322 if ((cl->cl_flags & HFSC_FSC) &&
1323 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) 1323 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1324 goto rtattr_failure; 1324 goto nla_put_failure;
1325 1325
1326 if ((cl->cl_flags & HFSC_USC) && 1326 if ((cl->cl_flags & HFSC_USC) &&
1327 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) 1327 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1328 goto rtattr_failure; 1328 goto nla_put_failure;
1329 1329
1330 return skb->len; 1330 return skb->len;
1331 1331
1332 rtattr_failure: 1332 nla_put_failure:
1333 return -1; 1333 return -1;
1334} 1334}
1335 1335
@@ -1339,20 +1339,20 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1339{ 1339{
1340 struct hfsc_class *cl = (struct hfsc_class *)arg; 1340 struct hfsc_class *cl = (struct hfsc_class *)arg;
1341 unsigned char *b = skb_tail_pointer(skb); 1341 unsigned char *b = skb_tail_pointer(skb);
1342 struct rtattr *rta = (struct rtattr *)b; 1342 struct nlattr *nla = (struct nlattr *)b;
1343 1343
1344 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; 1344 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
1345 tcm->tcm_handle = cl->classid; 1345 tcm->tcm_handle = cl->classid;
1346 if (cl->level == 0) 1346 if (cl->level == 0)
1347 tcm->tcm_info = cl->qdisc->handle; 1347 tcm->tcm_info = cl->qdisc->handle;
1348 1348
1349 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1349 NLA_PUT(skb, TCA_OPTIONS, 0, NULL);
1350 if (hfsc_dump_curves(skb, cl) < 0) 1350 if (hfsc_dump_curves(skb, cl) < 0)
1351 goto rtattr_failure; 1351 goto nla_put_failure;
1352 rta->rta_len = skb_tail_pointer(skb) - b; 1352 nla->nla_len = skb_tail_pointer(skb) - b;
1353 return skb->len; 1353 return skb->len;
1354 1354
1355 rtattr_failure: 1355 nla_put_failure:
1356 nlmsg_trim(skb, b); 1356 nlmsg_trim(skb, b);
1357 return -1; 1357 return -1;
1358} 1358}
@@ -1423,15 +1423,15 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
1423} 1423}
1424 1424
1425static int 1425static int
1426hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt) 1426hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1427{ 1427{
1428 struct hfsc_sched *q = qdisc_priv(sch); 1428 struct hfsc_sched *q = qdisc_priv(sch);
1429 struct tc_hfsc_qopt *qopt; 1429 struct tc_hfsc_qopt *qopt;
1430 unsigned int i; 1430 unsigned int i;
1431 1431
1432 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 1432 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1433 return -EINVAL; 1433 return -EINVAL;
1434 qopt = RTA_DATA(opt); 1434 qopt = nla_data(opt);
1435 1435
1436 q->defcls = qopt->defcls; 1436 q->defcls = qopt->defcls;
1437 for (i = 0; i < HFSC_HSIZE; i++) 1437 for (i = 0; i < HFSC_HSIZE; i++)
@@ -1459,14 +1459,14 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
1459} 1459}
1460 1460
1461static int 1461static int
1462hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt) 1462hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1463{ 1463{
1464 struct hfsc_sched *q = qdisc_priv(sch); 1464 struct hfsc_sched *q = qdisc_priv(sch);
1465 struct tc_hfsc_qopt *qopt; 1465 struct tc_hfsc_qopt *qopt;
1466 1466
1467 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) 1467 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1468 return -EINVAL; 1468 return -EINVAL;
1469 qopt = RTA_DATA(opt); 1469 qopt = nla_data(opt);
1470 1470
1471 sch_tree_lock(sch); 1471 sch_tree_lock(sch);
1472 q->defcls = qopt->defcls; 1472 q->defcls = qopt->defcls;
@@ -1550,10 +1550,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1550 struct tc_hfsc_qopt qopt; 1550 struct tc_hfsc_qopt qopt;
1551 1551
1552 qopt.defcls = q->defcls; 1552 qopt.defcls = q->defcls;
1553 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1553 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1554 return skb->len; 1554 return skb->len;
1555 1555
1556 rtattr_failure: 1556 nla_put_failure:
1557 nlmsg_trim(skb, b); 1557 nlmsg_trim(skb, b);
1558 return -1; 1558 return -1;
1559} 1559}