diff options
author | Patrick McHardy <kaber@trash.net> | 2008-01-23 01:11:17 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:11:10 -0500 |
commit | 1e90474c377e92db7262a8968a45c1dd980ca9e5 (patch) | |
tree | 645af56dcb17cf1a76fd3b7f1a8b833a3fffc3d7 /net | |
parent | 01480e1cf5e2118eba8a8968239f3242072f9563 (diff) |
[NET_SCHED]: Convert packet schedulers from rtnetlink to new netlink API
Convert packet schedulers to use the netlink API. Unfortunately a gradual
conversion is not possible without breaking compilation in the middle or
adding lots of casts, so this patch converts them all in one step. The
patch has been mostly generated automatically with some minor edits to
at least allow seperate conversion of classifiers and actions.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/gen_estimator.c | 8 | ||||
-rw-r--r-- | net/core/gen_stats.c | 9 | ||||
-rw-r--r-- | net/mac80211/wme.c | 14 | ||||
-rw-r--r-- | net/netfilter/xt_RATEEST.c | 7 | ||||
-rw-r--r-- | net/sched/act_api.c | 2 | ||||
-rw-r--r-- | net/sched/act_police.c | 6 | ||||
-rw-r--r-- | net/sched/sch_api.c | 87 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 48 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 156 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 85 | ||||
-rw-r--r-- | net/sched/sch_fifo.c | 10 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 6 | ||||
-rw-r--r-- | net/sched/sch_gred.c | 59 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 72 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 68 | ||||
-rw-r--r-- | net/sched/sch_ingress.c | 14 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 100 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 30 | ||||
-rw-r--r-- | net/sched/sch_red.c | 52 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 12 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 49 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 2 |
22 files changed, 467 insertions, 429 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 7ab9060bccd0..57abe8266be1 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -159,13 +159,13 @@ skip: | |||
159 | int gen_new_estimator(struct gnet_stats_basic *bstats, | 159 | int gen_new_estimator(struct gnet_stats_basic *bstats, |
160 | struct gnet_stats_rate_est *rate_est, | 160 | struct gnet_stats_rate_est *rate_est, |
161 | spinlock_t *stats_lock, | 161 | spinlock_t *stats_lock, |
162 | struct rtattr *opt) | 162 | struct nlattr *opt) |
163 | { | 163 | { |
164 | struct gen_estimator *est; | 164 | struct gen_estimator *est; |
165 | struct gnet_estimator *parm = RTA_DATA(opt); | 165 | struct gnet_estimator *parm = nla_data(opt); |
166 | int idx; | 166 | int idx; |
167 | 167 | ||
168 | if (RTA_PAYLOAD(opt) < sizeof(*parm)) | 168 | if (nla_len(opt) < sizeof(*parm)) |
169 | return -EINVAL; | 169 | return -EINVAL; |
170 | 170 | ||
171 | if (parm->interval < -2 || parm->interval > 3) | 171 | if (parm->interval < -2 || parm->interval > 3) |
@@ -254,7 +254,7 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats, | |||
254 | */ | 254 | */ |
255 | int gen_replace_estimator(struct gnet_stats_basic *bstats, | 255 | int gen_replace_estimator(struct gnet_stats_basic *bstats, |
256 | struct gnet_stats_rate_est *rate_est, | 256 | struct gnet_stats_rate_est *rate_est, |
257 | spinlock_t *stats_lock, struct rtattr *opt) | 257 | spinlock_t *stats_lock, struct nlattr *opt) |
258 | { | 258 | { |
259 | gen_kill_estimator(bstats, rate_est); | 259 | gen_kill_estimator(bstats, rate_est); |
260 | return gen_new_estimator(bstats, rate_est, stats_lock, opt); | 260 | return gen_new_estimator(bstats, rate_est, stats_lock, opt); |
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 8073561f7c63..c3d0ffeac243 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
@@ -20,16 +20,17 @@ | |||
20 | #include <linux/socket.h> | 20 | #include <linux/socket.h> |
21 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
22 | #include <linux/gen_stats.h> | 22 | #include <linux/gen_stats.h> |
23 | #include <net/netlink.h> | ||
23 | #include <net/gen_stats.h> | 24 | #include <net/gen_stats.h> |
24 | 25 | ||
25 | 26 | ||
26 | static inline int | 27 | static inline int |
27 | gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) | 28 | gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) |
28 | { | 29 | { |
29 | RTA_PUT(d->skb, type, size, buf); | 30 | NLA_PUT(d->skb, type, size, buf); |
30 | return 0; | 31 | return 0; |
31 | 32 | ||
32 | rtattr_failure: | 33 | nla_put_failure: |
33 | spin_unlock_bh(d->lock); | 34 | spin_unlock_bh(d->lock); |
34 | return -1; | 35 | return -1; |
35 | } | 36 | } |
@@ -62,7 +63,7 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, | |||
62 | spin_lock_bh(lock); | 63 | spin_lock_bh(lock); |
63 | d->lock = lock; | 64 | d->lock = lock; |
64 | if (type) | 65 | if (type) |
65 | d->tail = (struct rtattr *)skb_tail_pointer(skb); | 66 | d->tail = (struct nlattr *)skb_tail_pointer(skb); |
66 | d->skb = skb; | 67 | d->skb = skb; |
67 | d->compat_tc_stats = tc_stats_type; | 68 | d->compat_tc_stats = tc_stats_type; |
68 | d->compat_xstats = xstats_type; | 69 | d->compat_xstats = xstats_type; |
@@ -213,7 +214,7 @@ int | |||
213 | gnet_stats_finish_copy(struct gnet_dump *d) | 214 | gnet_stats_finish_copy(struct gnet_dump *d) |
214 | { | 215 | { |
215 | if (d->tail) | 216 | if (d->tail) |
216 | d->tail->rta_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; | 217 | d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; |
217 | 218 | ||
218 | if (d->compat_tc_stats) | 219 | if (d->compat_tc_stats) |
219 | if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, | 220 | if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 024519522d37..4e236599dd31 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -297,16 +297,16 @@ static void wme_qdiscop_destroy(struct Qdisc* qd) | |||
297 | 297 | ||
298 | 298 | ||
299 | /* called whenever parameters are updated on existing qdisc */ | 299 | /* called whenever parameters are updated on existing qdisc */ |
300 | static int wme_qdiscop_tune(struct Qdisc *qd, struct rtattr *opt) | 300 | static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt) |
301 | { | 301 | { |
302 | /* struct ieee80211_sched_data *q = qdisc_priv(qd); | 302 | /* struct ieee80211_sched_data *q = qdisc_priv(qd); |
303 | */ | 303 | */ |
304 | /* check our options block is the right size */ | 304 | /* check our options block is the right size */ |
305 | /* copy any options to our local structure */ | 305 | /* copy any options to our local structure */ |
306 | /* Ignore options block for now - always use static mapping | 306 | /* Ignore options block for now - always use static mapping |
307 | struct tc_ieee80211_qopt *qopt = RTA_DATA(opt); | 307 | struct tc_ieee80211_qopt *qopt = nla_data(opt); |
308 | 308 | ||
309 | if (opt->rta_len < RTA_LENGTH(sizeof(*qopt))) | 309 | if (opt->nla_len < nla_attr_size(sizeof(*qopt))) |
310 | return -EINVAL; | 310 | return -EINVAL; |
311 | memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue)); | 311 | memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue)); |
312 | */ | 312 | */ |
@@ -315,7 +315,7 @@ static int wme_qdiscop_tune(struct Qdisc *qd, struct rtattr *opt) | |||
315 | 315 | ||
316 | 316 | ||
317 | /* called during initial creation of qdisc on device */ | 317 | /* called during initial creation of qdisc on device */ |
318 | static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt) | 318 | static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt) |
319 | { | 319 | { |
320 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 320 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
321 | struct net_device *dev = qd->dev; | 321 | struct net_device *dev = qd->dev; |
@@ -370,10 +370,10 @@ static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb) | |||
370 | struct tc_ieee80211_qopt opt; | 370 | struct tc_ieee80211_qopt opt; |
371 | 371 | ||
372 | memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1); | 372 | memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1); |
373 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 373 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
374 | */ return skb->len; | 374 | */ return skb->len; |
375 | /* | 375 | /* |
376 | rtattr_failure: | 376 | nla_put_failure: |
377 | skb_trim(skb, p - skb->data);*/ | 377 | skb_trim(skb, p - skb->data);*/ |
378 | return -1; | 378 | return -1; |
379 | } | 379 | } |
@@ -444,7 +444,7 @@ static void wme_classop_put(struct Qdisc *q, unsigned long cl) | |||
444 | 444 | ||
445 | 445 | ||
446 | static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent, | 446 | static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent, |
447 | struct rtattr **tca, unsigned long *arg) | 447 | struct nlattr **tca, unsigned long *arg) |
448 | { | 448 | { |
449 | unsigned long cl = *arg; | 449 | unsigned long cl = *arg; |
450 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 450 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); |
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index c5ba525dc32f..24c73ba31eaa 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/rtnetlink.h> | 12 | #include <linux/rtnetlink.h> |
13 | #include <linux/random.h> | 13 | #include <linux/random.h> |
14 | #include <net/gen_stats.h> | 14 | #include <net/gen_stats.h> |
15 | #include <net/netlink.h> | ||
15 | 16 | ||
16 | #include <linux/netfilter/x_tables.h> | 17 | #include <linux/netfilter/x_tables.h> |
17 | #include <linux/netfilter/xt_RATEEST.h> | 18 | #include <linux/netfilter/xt_RATEEST.h> |
@@ -98,7 +99,7 @@ xt_rateest_tg_checkentry(const char *tablename, | |||
98 | struct xt_rateest_target_info *info = (void *)targinfo; | 99 | struct xt_rateest_target_info *info = (void *)targinfo; |
99 | struct xt_rateest *est; | 100 | struct xt_rateest *est; |
100 | struct { | 101 | struct { |
101 | struct rtattr opt; | 102 | struct nlattr opt; |
102 | struct gnet_estimator est; | 103 | struct gnet_estimator est; |
103 | } cfg; | 104 | } cfg; |
104 | 105 | ||
@@ -128,8 +129,8 @@ xt_rateest_tg_checkentry(const char *tablename, | |||
128 | est->params.interval = info->interval; | 129 | est->params.interval = info->interval; |
129 | est->params.ewma_log = info->ewma_log; | 130 | est->params.ewma_log = info->ewma_log; |
130 | 131 | ||
131 | cfg.opt.rta_len = RTA_LENGTH(sizeof(cfg.est)); | 132 | cfg.opt.nla_len = nla_attr_size(sizeof(cfg.est)); |
132 | cfg.opt.rta_type = TCA_STATS_RATE_EST; | 133 | cfg.opt.nla_type = TCA_STATS_RATE_EST; |
133 | cfg.est.interval = info->interval; | 134 | cfg.est.interval = info->interval; |
134 | cfg.est.ewma_log = info->ewma_log; | 135 | cfg.est.ewma_log = info->ewma_log; |
135 | 136 | ||
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 3825508fdcd1..11f3097a6912 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -227,7 +227,7 @@ struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_acti | |||
227 | p->tcfc_tm.lastuse = jiffies; | 227 | p->tcfc_tm.lastuse = jiffies; |
228 | if (est) | 228 | if (est) |
229 | gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, | 229 | gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, |
230 | &p->tcfc_lock, est); | 230 | &p->tcfc_lock, (struct nlattr *)est); |
231 | a->priv = (void *) p; | 231 | a->priv = (void *) p; |
232 | return p; | 232 | return p; |
233 | } | 233 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index a73e3e6d87ea..07ffdf9c5e59 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -174,12 +174,12 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, | |||
174 | override: | 174 | override: |
175 | if (parm->rate.rate) { | 175 | if (parm->rate.rate) { |
176 | err = -ENOMEM; | 176 | err = -ENOMEM; |
177 | R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); | 177 | R_tab = qdisc_get_rtab(&parm->rate, (struct nlattr *)tb[TCA_POLICE_RATE-1]); |
178 | if (R_tab == NULL) | 178 | if (R_tab == NULL) |
179 | goto failure; | 179 | goto failure; |
180 | if (parm->peakrate.rate) { | 180 | if (parm->peakrate.rate) { |
181 | P_tab = qdisc_get_rtab(&parm->peakrate, | 181 | P_tab = qdisc_get_rtab(&parm->peakrate, |
182 | tb[TCA_POLICE_PEAKRATE-1]); | 182 | (struct nlattr *)tb[TCA_POLICE_PEAKRATE-1]); |
183 | if (P_tab == NULL) { | 183 | if (P_tab == NULL) { |
184 | qdisc_put_rtab(R_tab); | 184 | qdisc_put_rtab(R_tab); |
185 | goto failure; | 185 | goto failure; |
@@ -216,7 +216,7 @@ override: | |||
216 | if (est) | 216 | if (est) |
217 | gen_replace_estimator(&police->tcf_bstats, | 217 | gen_replace_estimator(&police->tcf_bstats, |
218 | &police->tcf_rate_est, | 218 | &police->tcf_rate_est, |
219 | &police->tcf_lock, est); | 219 | &police->tcf_lock, (struct nlattr *)est); |
220 | 220 | ||
221 | spin_unlock_bh(&police->tcf_lock); | 221 | spin_unlock_bh(&police->tcf_lock); |
222 | if (ret != ACT_P_CREATED) | 222 | if (ret != ACT_P_CREATED) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index dc89a9343f30..7abb028dd96b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -213,14 +213,14 @@ static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) | |||
213 | 213 | ||
214 | /* Find queueing discipline by name */ | 214 | /* Find queueing discipline by name */ |
215 | 215 | ||
216 | static struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind) | 216 | static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) |
217 | { | 217 | { |
218 | struct Qdisc_ops *q = NULL; | 218 | struct Qdisc_ops *q = NULL; |
219 | 219 | ||
220 | if (kind) { | 220 | if (kind) { |
221 | read_lock(&qdisc_mod_lock); | 221 | read_lock(&qdisc_mod_lock); |
222 | for (q = qdisc_base; q; q = q->next) { | 222 | for (q = qdisc_base; q; q = q->next) { |
223 | if (rtattr_strcmp(kind, q->id) == 0) { | 223 | if (nla_strcmp(kind, q->id) == 0) { |
224 | if (!try_module_get(q->owner)) | 224 | if (!try_module_get(q->owner)) |
225 | q = NULL; | 225 | q = NULL; |
226 | break; | 226 | break; |
@@ -233,7 +233,7 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind) | |||
233 | 233 | ||
234 | static struct qdisc_rate_table *qdisc_rtab_list; | 234 | static struct qdisc_rate_table *qdisc_rtab_list; |
235 | 235 | ||
236 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab) | 236 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) |
237 | { | 237 | { |
238 | struct qdisc_rate_table *rtab; | 238 | struct qdisc_rate_table *rtab; |
239 | 239 | ||
@@ -244,14 +244,14 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *ta | |||
244 | } | 244 | } |
245 | } | 245 | } |
246 | 246 | ||
247 | if (tab == NULL || r->rate == 0 || r->cell_log == 0 || RTA_PAYLOAD(tab) != 1024) | 247 | if (tab == NULL || r->rate == 0 || r->cell_log == 0 || nla_len(tab) != 1024) |
248 | return NULL; | 248 | return NULL; |
249 | 249 | ||
250 | rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); | 250 | rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); |
251 | if (rtab) { | 251 | if (rtab) { |
252 | rtab->rate = *r; | 252 | rtab->rate = *r; |
253 | rtab->refcnt = 1; | 253 | rtab->refcnt = 1; |
254 | memcpy(rtab->data, RTA_DATA(tab), 1024); | 254 | memcpy(rtab->data, nla_data(tab), 1024); |
255 | rtab->next = qdisc_rtab_list; | 255 | rtab->next = qdisc_rtab_list; |
256 | qdisc_rtab_list = rtab; | 256 | qdisc_rtab_list = rtab; |
257 | } | 257 | } |
@@ -445,10 +445,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
445 | 445 | ||
446 | static struct Qdisc * | 446 | static struct Qdisc * |
447 | qdisc_create(struct net_device *dev, u32 parent, u32 handle, | 447 | qdisc_create(struct net_device *dev, u32 parent, u32 handle, |
448 | struct rtattr **tca, int *errp) | 448 | struct nlattr **tca, int *errp) |
449 | { | 449 | { |
450 | int err; | 450 | int err; |
451 | struct rtattr *kind = tca[TCA_KIND-1]; | 451 | struct nlattr *kind = tca[TCA_KIND]; |
452 | struct Qdisc *sch; | 452 | struct Qdisc *sch; |
453 | struct Qdisc_ops *ops; | 453 | struct Qdisc_ops *ops; |
454 | 454 | ||
@@ -456,7 +456,7 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle, | |||
456 | #ifdef CONFIG_KMOD | 456 | #ifdef CONFIG_KMOD |
457 | if (ops == NULL && kind != NULL) { | 457 | if (ops == NULL && kind != NULL) { |
458 | char name[IFNAMSIZ]; | 458 | char name[IFNAMSIZ]; |
459 | if (rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { | 459 | if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { |
460 | /* We dropped the RTNL semaphore in order to | 460 | /* We dropped the RTNL semaphore in order to |
461 | * perform the module load. So, even if we | 461 | * perform the module load. So, even if we |
462 | * succeeded in loading the module we have to | 462 | * succeeded in loading the module we have to |
@@ -509,11 +509,11 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle, | |||
509 | 509 | ||
510 | sch->handle = handle; | 510 | sch->handle = handle; |
511 | 511 | ||
512 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { | 512 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { |
513 | if (tca[TCA_RATE-1]) { | 513 | if (tca[TCA_RATE]) { |
514 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, | 514 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, |
515 | sch->stats_lock, | 515 | sch->stats_lock, |
516 | tca[TCA_RATE-1]); | 516 | tca[TCA_RATE]); |
517 | if (err) { | 517 | if (err) { |
518 | /* | 518 | /* |
519 | * Any broken qdiscs that would require | 519 | * Any broken qdiscs that would require |
@@ -541,20 +541,20 @@ err_out: | |||
541 | return NULL; | 541 | return NULL; |
542 | } | 542 | } |
543 | 543 | ||
544 | static int qdisc_change(struct Qdisc *sch, struct rtattr **tca) | 544 | static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) |
545 | { | 545 | { |
546 | if (tca[TCA_OPTIONS-1]) { | 546 | if (tca[TCA_OPTIONS]) { |
547 | int err; | 547 | int err; |
548 | 548 | ||
549 | if (sch->ops->change == NULL) | 549 | if (sch->ops->change == NULL) |
550 | return -EINVAL; | 550 | return -EINVAL; |
551 | err = sch->ops->change(sch, tca[TCA_OPTIONS-1]); | 551 | err = sch->ops->change(sch, tca[TCA_OPTIONS]); |
552 | if (err) | 552 | if (err) |
553 | return err; | 553 | return err; |
554 | } | 554 | } |
555 | if (tca[TCA_RATE-1]) | 555 | if (tca[TCA_RATE]) |
556 | gen_replace_estimator(&sch->bstats, &sch->rate_est, | 556 | gen_replace_estimator(&sch->bstats, &sch->rate_est, |
557 | sch->stats_lock, tca[TCA_RATE-1]); | 557 | sch->stats_lock, tca[TCA_RATE]); |
558 | return 0; | 558 | return 0; |
559 | } | 559 | } |
560 | 560 | ||
@@ -606,7 +606,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
606 | { | 606 | { |
607 | struct net *net = skb->sk->sk_net; | 607 | struct net *net = skb->sk->sk_net; |
608 | struct tcmsg *tcm = NLMSG_DATA(n); | 608 | struct tcmsg *tcm = NLMSG_DATA(n); |
609 | struct rtattr **tca = arg; | 609 | struct nlattr *tca[TCA_MAX + 1]; |
610 | struct net_device *dev; | 610 | struct net_device *dev; |
611 | u32 clid = tcm->tcm_parent; | 611 | u32 clid = tcm->tcm_parent; |
612 | struct Qdisc *q = NULL; | 612 | struct Qdisc *q = NULL; |
@@ -619,6 +619,10 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
619 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 619 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
620 | return -ENODEV; | 620 | return -ENODEV; |
621 | 621 | ||
622 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | ||
623 | if (err < 0) | ||
624 | return err; | ||
625 | |||
622 | if (clid) { | 626 | if (clid) { |
623 | if (clid != TC_H_ROOT) { | 627 | if (clid != TC_H_ROOT) { |
624 | if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { | 628 | if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { |
@@ -641,7 +645,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
641 | return -ENOENT; | 645 | return -ENOENT; |
642 | } | 646 | } |
643 | 647 | ||
644 | if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) | 648 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) |
645 | return -EINVAL; | 649 | return -EINVAL; |
646 | 650 | ||
647 | if (n->nlmsg_type == RTM_DELQDISC) { | 651 | if (n->nlmsg_type == RTM_DELQDISC) { |
@@ -671,7 +675,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
671 | { | 675 | { |
672 | struct net *net = skb->sk->sk_net; | 676 | struct net *net = skb->sk->sk_net; |
673 | struct tcmsg *tcm; | 677 | struct tcmsg *tcm; |
674 | struct rtattr **tca; | 678 | struct nlattr *tca[TCA_MAX + 1]; |
675 | struct net_device *dev; | 679 | struct net_device *dev; |
676 | u32 clid; | 680 | u32 clid; |
677 | struct Qdisc *q, *p; | 681 | struct Qdisc *q, *p; |
@@ -683,13 +687,16 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
683 | replay: | 687 | replay: |
684 | /* Reinit, just in case something touches this. */ | 688 | /* Reinit, just in case something touches this. */ |
685 | tcm = NLMSG_DATA(n); | 689 | tcm = NLMSG_DATA(n); |
686 | tca = arg; | ||
687 | clid = tcm->tcm_parent; | 690 | clid = tcm->tcm_parent; |
688 | q = p = NULL; | 691 | q = p = NULL; |
689 | 692 | ||
690 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 693 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
691 | return -ENODEV; | 694 | return -ENODEV; |
692 | 695 | ||
696 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | ||
697 | if (err < 0) | ||
698 | return err; | ||
699 | |||
693 | if (clid) { | 700 | if (clid) { |
694 | if (clid != TC_H_ROOT) { | 701 | if (clid != TC_H_ROOT) { |
695 | if (clid != TC_H_INGRESS) { | 702 | if (clid != TC_H_INGRESS) { |
@@ -717,7 +724,7 @@ replay: | |||
717 | goto create_n_graft; | 724 | goto create_n_graft; |
718 | if (n->nlmsg_flags&NLM_F_EXCL) | 725 | if (n->nlmsg_flags&NLM_F_EXCL) |
719 | return -EEXIST; | 726 | return -EEXIST; |
720 | if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) | 727 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) |
721 | return -EINVAL; | 728 | return -EINVAL; |
722 | if (q == p || | 729 | if (q == p || |
723 | (p && check_loop(q, p, 0))) | 730 | (p && check_loop(q, p, 0))) |
@@ -750,8 +757,8 @@ replay: | |||
750 | if ((n->nlmsg_flags&NLM_F_CREATE) && | 757 | if ((n->nlmsg_flags&NLM_F_CREATE) && |
751 | (n->nlmsg_flags&NLM_F_REPLACE) && | 758 | (n->nlmsg_flags&NLM_F_REPLACE) && |
752 | ((n->nlmsg_flags&NLM_F_EXCL) || | 759 | ((n->nlmsg_flags&NLM_F_EXCL) || |
753 | (tca[TCA_KIND-1] && | 760 | (tca[TCA_KIND] && |
754 | rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)))) | 761 | nla_strcmp(tca[TCA_KIND], q->ops->id)))) |
755 | goto create_n_graft; | 762 | goto create_n_graft; |
756 | } | 763 | } |
757 | } | 764 | } |
@@ -766,7 +773,7 @@ replay: | |||
766 | return -ENOENT; | 773 | return -ENOENT; |
767 | if (n->nlmsg_flags&NLM_F_EXCL) | 774 | if (n->nlmsg_flags&NLM_F_EXCL) |
768 | return -EEXIST; | 775 | return -EEXIST; |
769 | if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id)) | 776 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) |
770 | return -EINVAL; | 777 | return -EINVAL; |
771 | err = qdisc_change(q, tca); | 778 | err = qdisc_change(q, tca); |
772 | if (err == 0) | 779 | if (err == 0) |
@@ -827,31 +834,31 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
827 | tcm->tcm_parent = clid; | 834 | tcm->tcm_parent = clid; |
828 | tcm->tcm_handle = q->handle; | 835 | tcm->tcm_handle = q->handle; |
829 | tcm->tcm_info = atomic_read(&q->refcnt); | 836 | tcm->tcm_info = atomic_read(&q->refcnt); |
830 | RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); | 837 | NLA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); |
831 | if (q->ops->dump && q->ops->dump(q, skb) < 0) | 838 | if (q->ops->dump && q->ops->dump(q, skb) < 0) |
832 | goto rtattr_failure; | 839 | goto nla_put_failure; |
833 | q->qstats.qlen = q->q.qlen; | 840 | q->qstats.qlen = q->q.qlen; |
834 | 841 | ||
835 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, | 842 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, |
836 | TCA_XSTATS, q->stats_lock, &d) < 0) | 843 | TCA_XSTATS, q->stats_lock, &d) < 0) |
837 | goto rtattr_failure; | 844 | goto nla_put_failure; |
838 | 845 | ||
839 | if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) | 846 | if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) |
840 | goto rtattr_failure; | 847 | goto nla_put_failure; |
841 | 848 | ||
842 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || | 849 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || |
843 | gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || | 850 | gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || |
844 | gnet_stats_copy_queue(&d, &q->qstats) < 0) | 851 | gnet_stats_copy_queue(&d, &q->qstats) < 0) |
845 | goto rtattr_failure; | 852 | goto nla_put_failure; |
846 | 853 | ||
847 | if (gnet_stats_finish_copy(&d) < 0) | 854 | if (gnet_stats_finish_copy(&d) < 0) |
848 | goto rtattr_failure; | 855 | goto nla_put_failure; |
849 | 856 | ||
850 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 857 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
851 | return skb->len; | 858 | return skb->len; |
852 | 859 | ||
853 | nlmsg_failure: | 860 | nlmsg_failure: |
854 | rtattr_failure: | 861 | nla_put_failure: |
855 | nlmsg_trim(skb, b); | 862 | nlmsg_trim(skb, b); |
856 | return -1; | 863 | return -1; |
857 | } | 864 | } |
@@ -939,7 +946,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
939 | { | 946 | { |
940 | struct net *net = skb->sk->sk_net; | 947 | struct net *net = skb->sk->sk_net; |
941 | struct tcmsg *tcm = NLMSG_DATA(n); | 948 | struct tcmsg *tcm = NLMSG_DATA(n); |
942 | struct rtattr **tca = arg; | 949 | struct nlattr *tca[TCA_MAX + 1]; |
943 | struct net_device *dev; | 950 | struct net_device *dev; |
944 | struct Qdisc *q = NULL; | 951 | struct Qdisc *q = NULL; |
945 | const struct Qdisc_class_ops *cops; | 952 | const struct Qdisc_class_ops *cops; |
@@ -956,6 +963,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
956 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 963 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
957 | return -ENODEV; | 964 | return -ENODEV; |
958 | 965 | ||
966 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | ||
967 | if (err < 0) | ||
968 | return err; | ||
969 | |||
959 | /* | 970 | /* |
960 | parent == TC_H_UNSPEC - unspecified parent. | 971 | parent == TC_H_UNSPEC - unspecified parent. |
961 | parent == TC_H_ROOT - class is root, which has no parent. | 972 | parent == TC_H_ROOT - class is root, which has no parent. |
@@ -1069,25 +1080,25 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, | |||
1069 | tcm->tcm_parent = q->handle; | 1080 | tcm->tcm_parent = q->handle; |
1070 | tcm->tcm_handle = q->handle; | 1081 | tcm->tcm_handle = q->handle; |
1071 | tcm->tcm_info = 0; | 1082 | tcm->tcm_info = 0; |
1072 | RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); | 1083 | NLA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id); |
1073 | if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) | 1084 | if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) |
1074 | goto rtattr_failure; | 1085 | goto nla_put_failure; |
1075 | 1086 | ||
1076 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, | 1087 | if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, |
1077 | TCA_XSTATS, q->stats_lock, &d) < 0) | 1088 | TCA_XSTATS, q->stats_lock, &d) < 0) |
1078 | goto rtattr_failure; | 1089 | goto nla_put_failure; |
1079 | 1090 | ||
1080 | if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) | 1091 | if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) |
1081 | goto rtattr_failure; | 1092 | goto nla_put_failure; |
1082 | 1093 | ||
1083 | if (gnet_stats_finish_copy(&d) < 0) | 1094 | if (gnet_stats_finish_copy(&d) < 0) |
1084 | goto rtattr_failure; | 1095 | goto nla_put_failure; |
1085 | 1096 | ||
1086 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 1097 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
1087 | return skb->len; | 1098 | return skb->len; |
1088 | 1099 | ||
1089 | nlmsg_failure: | 1100 | nlmsg_failure: |
1090 | rtattr_failure: | 1101 | nla_put_failure: |
1091 | nlmsg_trim(skb, b); | 1102 | nlmsg_trim(skb, b); |
1092 | return -1; | 1103 | return -1; |
1093 | } | 1104 | } |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 734be9d37d46..eb01aae117df 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -196,13 +196,13 @@ static const u8 llc_oui_ip[] = { | |||
196 | }; /* Ethertype IP (0800) */ | 196 | }; /* Ethertype IP (0800) */ |
197 | 197 | ||
198 | static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | 198 | static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, |
199 | struct rtattr **tca, unsigned long *arg) | 199 | struct nlattr **tca, unsigned long *arg) |
200 | { | 200 | { |
201 | struct atm_qdisc_data *p = qdisc_priv(sch); | 201 | struct atm_qdisc_data *p = qdisc_priv(sch); |
202 | struct atm_flow_data *flow = (struct atm_flow_data *)*arg; | 202 | struct atm_flow_data *flow = (struct atm_flow_data *)*arg; |
203 | struct atm_flow_data *excess = NULL; | 203 | struct atm_flow_data *excess = NULL; |
204 | struct rtattr *opt = tca[TCA_OPTIONS - 1]; | 204 | struct nlattr *opt = tca[TCA_OPTIONS]; |
205 | struct rtattr *tb[TCA_ATM_MAX]; | 205 | struct nlattr *tb[TCA_ATM_MAX + 1]; |
206 | struct socket *sock; | 206 | struct socket *sock; |
207 | int fd, error, hdr_len; | 207 | int fd, error, hdr_len; |
208 | void *hdr; | 208 | void *hdr; |
@@ -223,31 +223,31 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
223 | */ | 223 | */ |
224 | if (flow) | 224 | if (flow) |
225 | return -EBUSY; | 225 | return -EBUSY; |
226 | if (opt == NULL || rtattr_parse_nested(tb, TCA_ATM_MAX, opt)) | 226 | if (opt == NULL || nla_parse_nested(tb, TCA_ATM_MAX, opt, NULL)) |
227 | return -EINVAL; | 227 | return -EINVAL; |
228 | if (!tb[TCA_ATM_FD - 1] || RTA_PAYLOAD(tb[TCA_ATM_FD - 1]) < sizeof(fd)) | 228 | if (!tb[TCA_ATM_FD] || nla_len(tb[TCA_ATM_FD]) < sizeof(fd)) |
229 | return -EINVAL; | 229 | return -EINVAL; |
230 | fd = *(int *)RTA_DATA(tb[TCA_ATM_FD - 1]); | 230 | fd = *(int *)nla_data(tb[TCA_ATM_FD]); |
231 | pr_debug("atm_tc_change: fd %d\n", fd); | 231 | pr_debug("atm_tc_change: fd %d\n", fd); |
232 | if (tb[TCA_ATM_HDR - 1]) { | 232 | if (tb[TCA_ATM_HDR]) { |
233 | hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR - 1]); | 233 | hdr_len = nla_len(tb[TCA_ATM_HDR]); |
234 | hdr = RTA_DATA(tb[TCA_ATM_HDR - 1]); | 234 | hdr = nla_data(tb[TCA_ATM_HDR]); |
235 | } else { | 235 | } else { |
236 | hdr_len = RFC1483LLC_LEN; | 236 | hdr_len = RFC1483LLC_LEN; |
237 | hdr = NULL; /* default LLC/SNAP for IP */ | 237 | hdr = NULL; /* default LLC/SNAP for IP */ |
238 | } | 238 | } |
239 | if (!tb[TCA_ATM_EXCESS - 1]) | 239 | if (!tb[TCA_ATM_EXCESS]) |
240 | excess = NULL; | 240 | excess = NULL; |
241 | else { | 241 | else { |
242 | if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS - 1]) != sizeof(u32)) | 242 | if (nla_len(tb[TCA_ATM_EXCESS]) != sizeof(u32)) |
243 | return -EINVAL; | 243 | return -EINVAL; |
244 | excess = (struct atm_flow_data *) | 244 | excess = (struct atm_flow_data *) |
245 | atm_tc_get(sch, *(u32 *)RTA_DATA(tb[TCA_ATM_EXCESS - 1])); | 245 | atm_tc_get(sch, *(u32 *)nla_data(tb[TCA_ATM_EXCESS])); |
246 | if (!excess) | 246 | if (!excess) |
247 | return -ENOENT; | 247 | return -ENOENT; |
248 | } | 248 | } |
249 | pr_debug("atm_tc_change: type %d, payload %lu, hdr_len %d\n", | 249 | pr_debug("atm_tc_change: type %d, payload %lu, hdr_len %d\n", |
250 | opt->rta_type, RTA_PAYLOAD(opt), hdr_len); | 250 | opt->nla_type, nla_len(opt), hdr_len); |
251 | sock = sockfd_lookup(fd, &error); | 251 | sock = sockfd_lookup(fd, &error); |
252 | if (!sock) | 252 | if (!sock) |
253 | return error; /* f_count++ */ | 253 | return error; /* f_count++ */ |
@@ -541,7 +541,7 @@ static unsigned int atm_tc_drop(struct Qdisc *sch) | |||
541 | return 0; | 541 | return 0; |
542 | } | 542 | } |
543 | 543 | ||
544 | static int atm_tc_init(struct Qdisc *sch, struct rtattr *opt) | 544 | static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) |
545 | { | 545 | { |
546 | struct atm_qdisc_data *p = qdisc_priv(sch); | 546 | struct atm_qdisc_data *p = qdisc_priv(sch); |
547 | 547 | ||
@@ -602,7 +602,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, | |||
602 | struct atm_qdisc_data *p = qdisc_priv(sch); | 602 | struct atm_qdisc_data *p = qdisc_priv(sch); |
603 | struct atm_flow_data *flow = (struct atm_flow_data *)cl; | 603 | struct atm_flow_data *flow = (struct atm_flow_data *)cl; |
604 | unsigned char *b = skb_tail_pointer(skb); | 604 | unsigned char *b = skb_tail_pointer(skb); |
605 | struct rtattr *rta; | 605 | struct nlattr *nla; |
606 | 606 | ||
607 | pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", | 607 | pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", |
608 | sch, p, flow, skb, tcm); | 608 | sch, p, flow, skb, tcm); |
@@ -610,9 +610,9 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, | |||
610 | return -EINVAL; | 610 | return -EINVAL; |
611 | tcm->tcm_handle = flow->classid; | 611 | tcm->tcm_handle = flow->classid; |
612 | tcm->tcm_info = flow->q->handle; | 612 | tcm->tcm_info = flow->q->handle; |
613 | rta = (struct rtattr *)b; | 613 | nla = (struct nlattr *)b; |
614 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 614 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
615 | RTA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); | 615 | NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); |
616 | if (flow->vcc) { | 616 | if (flow->vcc) { |
617 | struct sockaddr_atmpvc pvc; | 617 | struct sockaddr_atmpvc pvc; |
618 | int state; | 618 | int state; |
@@ -621,21 +621,21 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, | |||
621 | pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; | 621 | pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; |
622 | pvc.sap_addr.vpi = flow->vcc->vpi; | 622 | pvc.sap_addr.vpi = flow->vcc->vpi; |
623 | pvc.sap_addr.vci = flow->vcc->vci; | 623 | pvc.sap_addr.vci = flow->vcc->vci; |
624 | RTA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); | 624 | NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); |
625 | state = ATM_VF2VS(flow->vcc->flags); | 625 | state = ATM_VF2VS(flow->vcc->flags); |
626 | RTA_PUT(skb, TCA_ATM_STATE, sizeof(state), &state); | 626 | NLA_PUT(skb, TCA_ATM_STATE, sizeof(state), &state); |
627 | } | 627 | } |
628 | if (flow->excess) | 628 | if (flow->excess) |
629 | RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(u32), &flow->classid); | 629 | NLA_PUT(skb, TCA_ATM_EXCESS, sizeof(u32), &flow->classid); |
630 | else { | 630 | else { |
631 | static u32 zero; | 631 | static u32 zero; |
632 | 632 | ||
633 | RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(zero), &zero); | 633 | NLA_PUT(skb, TCA_ATM_EXCESS, sizeof(zero), &zero); |
634 | } | 634 | } |
635 | rta->rta_len = skb_tail_pointer(skb) - b; | 635 | nla->nla_len = skb_tail_pointer(skb) - b; |
636 | return skb->len; | 636 | return skb->len; |
637 | 637 | ||
638 | rtattr_failure: | 638 | nla_put_failure: |
639 | nlmsg_trim(skb, b); | 639 | nlmsg_trim(skb, b); |
640 | return -1; | 640 | return -1; |
641 | } | 641 | } |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index bea123fc24a4..5c8667ef4ba7 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1377,24 +1377,24 @@ static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) | |||
1377 | return 0; | 1377 | return 0; |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | static int cbq_init(struct Qdisc *sch, struct rtattr *opt) | 1380 | static int cbq_init(struct Qdisc *sch, struct nlattr *opt) |
1381 | { | 1381 | { |
1382 | struct cbq_sched_data *q = qdisc_priv(sch); | 1382 | struct cbq_sched_data *q = qdisc_priv(sch); |
1383 | struct rtattr *tb[TCA_CBQ_MAX]; | 1383 | struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1384 | struct tc_ratespec *r; | 1384 | struct tc_ratespec *r; |
1385 | 1385 | ||
1386 | if (rtattr_parse_nested(tb, TCA_CBQ_MAX, opt) < 0 || | 1386 | if (nla_parse_nested(tb, TCA_CBQ_MAX, opt, NULL) < 0 || |
1387 | tb[TCA_CBQ_RTAB-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL || | 1387 | tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL || |
1388 | RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec)) | 1388 | nla_len(tb[TCA_CBQ_RATE]) < sizeof(struct tc_ratespec)) |
1389 | return -EINVAL; | 1389 | return -EINVAL; |
1390 | 1390 | ||
1391 | if (tb[TCA_CBQ_LSSOPT-1] && | 1391 | if (tb[TCA_CBQ_LSSOPT] && |
1392 | RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt)) | 1392 | nla_len(tb[TCA_CBQ_LSSOPT]) < sizeof(struct tc_cbq_lssopt)) |
1393 | return -EINVAL; | 1393 | return -EINVAL; |
1394 | 1394 | ||
1395 | r = RTA_DATA(tb[TCA_CBQ_RATE-1]); | 1395 | r = nla_data(tb[TCA_CBQ_RATE]); |
1396 | 1396 | ||
1397 | if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB-1])) == NULL) | 1397 | if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) |
1398 | return -EINVAL; | 1398 | return -EINVAL; |
1399 | 1399 | ||
1400 | q->link.refcnt = 1; | 1400 | q->link.refcnt = 1; |
@@ -1427,8 +1427,8 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) | |||
1427 | 1427 | ||
1428 | cbq_link_class(&q->link); | 1428 | cbq_link_class(&q->link); |
1429 | 1429 | ||
1430 | if (tb[TCA_CBQ_LSSOPT-1]) | 1430 | if (tb[TCA_CBQ_LSSOPT]) |
1431 | cbq_set_lss(&q->link, RTA_DATA(tb[TCA_CBQ_LSSOPT-1])); | 1431 | cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); |
1432 | 1432 | ||
1433 | cbq_addprio(q, &q->link); | 1433 | cbq_addprio(q, &q->link); |
1434 | return 0; | 1434 | return 0; |
@@ -1438,10 +1438,10 @@ static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) | |||
1438 | { | 1438 | { |
1439 | unsigned char *b = skb_tail_pointer(skb); | 1439 | unsigned char *b = skb_tail_pointer(skb); |
1440 | 1440 | ||
1441 | RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); | 1441 | NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); |
1442 | return skb->len; | 1442 | return skb->len; |
1443 | 1443 | ||
1444 | rtattr_failure: | 1444 | nla_put_failure: |
1445 | nlmsg_trim(skb, b); | 1445 | nlmsg_trim(skb, b); |
1446 | return -1; | 1446 | return -1; |
1447 | } | 1447 | } |
@@ -1463,10 +1463,10 @@ static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) | |||
1463 | opt.minidle = (u32)(-cl->minidle); | 1463 | opt.minidle = (u32)(-cl->minidle); |
1464 | opt.offtime = cl->offtime; | 1464 | opt.offtime = cl->offtime; |
1465 | opt.change = ~0; | 1465 | opt.change = ~0; |
1466 | RTA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); | 1466 | NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); |
1467 | return skb->len; | 1467 | return skb->len; |
1468 | 1468 | ||
1469 | rtattr_failure: | 1469 | nla_put_failure: |
1470 | nlmsg_trim(skb, b); | 1470 | nlmsg_trim(skb, b); |
1471 | return -1; | 1471 | return -1; |
1472 | } | 1472 | } |
@@ -1481,10 +1481,10 @@ static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) | |||
1481 | opt.priority = cl->priority+1; | 1481 | opt.priority = cl->priority+1; |
1482 | opt.cpriority = cl->cpriority+1; | 1482 | opt.cpriority = cl->cpriority+1; |
1483 | opt.weight = cl->weight; | 1483 | opt.weight = cl->weight; |
1484 | RTA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); | 1484 | NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); |
1485 | return skb->len; | 1485 | return skb->len; |
1486 | 1486 | ||
1487 | rtattr_failure: | 1487 | nla_put_failure: |
1488 | nlmsg_trim(skb, b); | 1488 | nlmsg_trim(skb, b); |
1489 | return -1; | 1489 | return -1; |
1490 | } | 1490 | } |
@@ -1498,10 +1498,10 @@ static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) | |||
1498 | opt.priority2 = cl->priority2+1; | 1498 | opt.priority2 = cl->priority2+1; |
1499 | opt.pad = 0; | 1499 | opt.pad = 0; |
1500 | opt.penalty = cl->penalty; | 1500 | opt.penalty = cl->penalty; |
1501 | RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); | 1501 | NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); |
1502 | return skb->len; | 1502 | return skb->len; |
1503 | 1503 | ||
1504 | rtattr_failure: | 1504 | nla_put_failure: |
1505 | nlmsg_trim(skb, b); | 1505 | nlmsg_trim(skb, b); |
1506 | return -1; | 1506 | return -1; |
1507 | } | 1507 | } |
@@ -1515,11 +1515,11 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | |||
1515 | opt.split = cl->split ? cl->split->classid : 0; | 1515 | opt.split = cl->split ? cl->split->classid : 0; |
1516 | opt.defmap = cl->defmap; | 1516 | opt.defmap = cl->defmap; |
1517 | opt.defchange = ~0; | 1517 | opt.defchange = ~0; |
1518 | RTA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); | 1518 | NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); |
1519 | } | 1519 | } |
1520 | return skb->len; | 1520 | return skb->len; |
1521 | 1521 | ||
1522 | rtattr_failure: | 1522 | nla_put_failure: |
1523 | nlmsg_trim(skb, b); | 1523 | nlmsg_trim(skb, b); |
1524 | return -1; | 1524 | return -1; |
1525 | } | 1525 | } |
@@ -1534,11 +1534,11 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | |||
1534 | opt.police = cl->police; | 1534 | opt.police = cl->police; |
1535 | opt.__res1 = 0; | 1535 | opt.__res1 = 0; |
1536 | opt.__res2 = 0; | 1536 | opt.__res2 = 0; |
1537 | RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); | 1537 | NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); |
1538 | } | 1538 | } |
1539 | return skb->len; | 1539 | return skb->len; |
1540 | 1540 | ||
1541 | rtattr_failure: | 1541 | nla_put_failure: |
1542 | nlmsg_trim(skb, b); | 1542 | nlmsg_trim(skb, b); |
1543 | return -1; | 1543 | return -1; |
1544 | } | 1544 | } |
@@ -1562,16 +1562,16 @@ static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1562 | { | 1562 | { |
1563 | struct cbq_sched_data *q = qdisc_priv(sch); | 1563 | struct cbq_sched_data *q = qdisc_priv(sch); |
1564 | unsigned char *b = skb_tail_pointer(skb); | 1564 | unsigned char *b = skb_tail_pointer(skb); |
1565 | struct rtattr *rta; | 1565 | struct nlattr *nla; |
1566 | 1566 | ||
1567 | rta = (struct rtattr*)b; | 1567 | nla = (struct nlattr*)b; |
1568 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1568 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1569 | if (cbq_dump_attr(skb, &q->link) < 0) | 1569 | if (cbq_dump_attr(skb, &q->link) < 0) |
1570 | goto rtattr_failure; | 1570 | goto nla_put_failure; |
1571 | rta->rta_len = skb_tail_pointer(skb) - b; | 1571 | nla->nla_len = skb_tail_pointer(skb) - b; |
1572 | return skb->len; | 1572 | return skb->len; |
1573 | 1573 | ||
1574 | rtattr_failure: | 1574 | nla_put_failure: |
1575 | nlmsg_trim(skb, b); | 1575 | nlmsg_trim(skb, b); |
1576 | return -1; | 1576 | return -1; |
1577 | } | 1577 | } |
@@ -1591,7 +1591,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1591 | { | 1591 | { |
1592 | struct cbq_class *cl = (struct cbq_class*)arg; | 1592 | struct cbq_class *cl = (struct cbq_class*)arg; |
1593 | unsigned char *b = skb_tail_pointer(skb); | 1593 | unsigned char *b = skb_tail_pointer(skb); |
1594 | struct rtattr *rta; | 1594 | struct nlattr *nla; |
1595 | 1595 | ||
1596 | if (cl->tparent) | 1596 | if (cl->tparent) |
1597 | tcm->tcm_parent = cl->tparent->classid; | 1597 | tcm->tcm_parent = cl->tparent->classid; |
@@ -1600,14 +1600,14 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1600 | tcm->tcm_handle = cl->classid; | 1600 | tcm->tcm_handle = cl->classid; |
1601 | tcm->tcm_info = cl->q->handle; | 1601 | tcm->tcm_info = cl->q->handle; |
1602 | 1602 | ||
1603 | rta = (struct rtattr*)b; | 1603 | nla = (struct nlattr*)b; |
1604 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1604 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1605 | if (cbq_dump_attr(skb, cl) < 0) | 1605 | if (cbq_dump_attr(skb, cl) < 0) |
1606 | goto rtattr_failure; | 1606 | goto nla_put_failure; |
1607 | rta->rta_len = skb_tail_pointer(skb) - b; | 1607 | nla->nla_len = skb_tail_pointer(skb) - b; |
1608 | return skb->len; | 1608 | return skb->len; |
1609 | 1609 | ||
1610 | rtattr_failure: | 1610 | nla_put_failure: |
1611 | nlmsg_trim(skb, b); | 1611 | nlmsg_trim(skb, b); |
1612 | return -1; | 1612 | return -1; |
1613 | } | 1613 | } |
@@ -1753,43 +1753,43 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg) | |||
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | static int | 1755 | static int |
1756 | cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca, | 1756 | cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, |
1757 | unsigned long *arg) | 1757 | unsigned long *arg) |
1758 | { | 1758 | { |
1759 | int err; | 1759 | int err; |
1760 | struct cbq_sched_data *q = qdisc_priv(sch); | 1760 | struct cbq_sched_data *q = qdisc_priv(sch); |
1761 | struct cbq_class *cl = (struct cbq_class*)*arg; | 1761 | struct cbq_class *cl = (struct cbq_class*)*arg; |
1762 | struct rtattr *opt = tca[TCA_OPTIONS-1]; | 1762 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1763 | struct rtattr *tb[TCA_CBQ_MAX]; | 1763 | struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1764 | struct cbq_class *parent; | 1764 | struct cbq_class *parent; |
1765 | struct qdisc_rate_table *rtab = NULL; | 1765 | struct qdisc_rate_table *rtab = NULL; |
1766 | 1766 | ||
1767 | if (opt==NULL || rtattr_parse_nested(tb, TCA_CBQ_MAX, opt)) | 1767 | if (opt==NULL || nla_parse_nested(tb, TCA_CBQ_MAX, opt, NULL)) |
1768 | return -EINVAL; | 1768 | return -EINVAL; |
1769 | 1769 | ||
1770 | if (tb[TCA_CBQ_OVL_STRATEGY-1] && | 1770 | if (tb[TCA_CBQ_OVL_STRATEGY] && |
1771 | RTA_PAYLOAD(tb[TCA_CBQ_OVL_STRATEGY-1]) < sizeof(struct tc_cbq_ovl)) | 1771 | nla_len(tb[TCA_CBQ_OVL_STRATEGY]) < sizeof(struct tc_cbq_ovl)) |
1772 | return -EINVAL; | 1772 | return -EINVAL; |
1773 | 1773 | ||
1774 | if (tb[TCA_CBQ_FOPT-1] && | 1774 | if (tb[TCA_CBQ_FOPT] && |
1775 | RTA_PAYLOAD(tb[TCA_CBQ_FOPT-1]) < sizeof(struct tc_cbq_fopt)) | 1775 | nla_len(tb[TCA_CBQ_FOPT]) < sizeof(struct tc_cbq_fopt)) |
1776 | return -EINVAL; | 1776 | return -EINVAL; |
1777 | 1777 | ||
1778 | if (tb[TCA_CBQ_RATE-1] && | 1778 | if (tb[TCA_CBQ_RATE] && |
1779 | RTA_PAYLOAD(tb[TCA_CBQ_RATE-1]) < sizeof(struct tc_ratespec)) | 1779 | nla_len(tb[TCA_CBQ_RATE]) < sizeof(struct tc_ratespec)) |
1780 | return -EINVAL; | 1780 | return -EINVAL; |
1781 | 1781 | ||
1782 | if (tb[TCA_CBQ_LSSOPT-1] && | 1782 | if (tb[TCA_CBQ_LSSOPT] && |
1783 | RTA_PAYLOAD(tb[TCA_CBQ_LSSOPT-1]) < sizeof(struct tc_cbq_lssopt)) | 1783 | nla_len(tb[TCA_CBQ_LSSOPT]) < sizeof(struct tc_cbq_lssopt)) |
1784 | return -EINVAL; | 1784 | return -EINVAL; |
1785 | 1785 | ||
1786 | if (tb[TCA_CBQ_WRROPT-1] && | 1786 | if (tb[TCA_CBQ_WRROPT] && |
1787 | RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) | 1787 | nla_len(tb[TCA_CBQ_WRROPT]) < sizeof(struct tc_cbq_wrropt)) |
1788 | return -EINVAL; | 1788 | return -EINVAL; |
1789 | 1789 | ||
1790 | #ifdef CONFIG_NET_CLS_ACT | 1790 | #ifdef CONFIG_NET_CLS_ACT |
1791 | if (tb[TCA_CBQ_POLICE-1] && | 1791 | if (tb[TCA_CBQ_POLICE] && |
1792 | RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) | 1792 | nla_len(tb[TCA_CBQ_POLICE]) < sizeof(struct tc_cbq_police)) |
1793 | return -EINVAL; | 1793 | return -EINVAL; |
1794 | #endif | 1794 | #endif |
1795 | 1795 | ||
@@ -1802,8 +1802,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1802 | return -EINVAL; | 1802 | return -EINVAL; |
1803 | } | 1803 | } |
1804 | 1804 | ||
1805 | if (tb[TCA_CBQ_RATE-1]) { | 1805 | if (tb[TCA_CBQ_RATE]) { |
1806 | rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]); | 1806 | rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); |
1807 | if (rtab == NULL) | 1807 | if (rtab == NULL) |
1808 | return -EINVAL; | 1808 | return -EINVAL; |
1809 | } | 1809 | } |
@@ -1819,45 +1819,45 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1819 | qdisc_put_rtab(rtab); | 1819 | qdisc_put_rtab(rtab); |
1820 | } | 1820 | } |
1821 | 1821 | ||
1822 | if (tb[TCA_CBQ_LSSOPT-1]) | 1822 | if (tb[TCA_CBQ_LSSOPT]) |
1823 | cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1])); | 1823 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); |
1824 | 1824 | ||
1825 | if (tb[TCA_CBQ_WRROPT-1]) { | 1825 | if (tb[TCA_CBQ_WRROPT]) { |
1826 | cbq_rmprio(q, cl); | 1826 | cbq_rmprio(q, cl); |
1827 | cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1])); | 1827 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | if (tb[TCA_CBQ_OVL_STRATEGY-1]) | 1830 | if (tb[TCA_CBQ_OVL_STRATEGY]) |
1831 | cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); | 1831 | cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); |
1832 | 1832 | ||
1833 | #ifdef CONFIG_NET_CLS_ACT | 1833 | #ifdef CONFIG_NET_CLS_ACT |
1834 | if (tb[TCA_CBQ_POLICE-1]) | 1834 | if (tb[TCA_CBQ_POLICE]) |
1835 | cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); | 1835 | cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); |
1836 | #endif | 1836 | #endif |
1837 | 1837 | ||
1838 | if (tb[TCA_CBQ_FOPT-1]) | 1838 | if (tb[TCA_CBQ_FOPT]) |
1839 | cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1])); | 1839 | cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); |
1840 | 1840 | ||
1841 | if (cl->q->q.qlen) | 1841 | if (cl->q->q.qlen) |
1842 | cbq_activate_class(cl); | 1842 | cbq_activate_class(cl); |
1843 | 1843 | ||
1844 | sch_tree_unlock(sch); | 1844 | sch_tree_unlock(sch); |
1845 | 1845 | ||
1846 | if (tca[TCA_RATE-1]) | 1846 | if (tca[TCA_RATE]) |
1847 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1847 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1848 | &sch->dev->queue_lock, | 1848 | &sch->dev->queue_lock, |
1849 | tca[TCA_RATE-1]); | 1849 | tca[TCA_RATE]); |
1850 | return 0; | 1850 | return 0; |
1851 | } | 1851 | } |
1852 | 1852 | ||
1853 | if (parentid == TC_H_ROOT) | 1853 | if (parentid == TC_H_ROOT) |
1854 | return -EINVAL; | 1854 | return -EINVAL; |
1855 | 1855 | ||
1856 | if (tb[TCA_CBQ_WRROPT-1] == NULL || tb[TCA_CBQ_RATE-1] == NULL || | 1856 | if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL || |
1857 | tb[TCA_CBQ_LSSOPT-1] == NULL) | 1857 | tb[TCA_CBQ_LSSOPT] == NULL) |
1858 | return -EINVAL; | 1858 | return -EINVAL; |
1859 | 1859 | ||
1860 | rtab = qdisc_get_rtab(RTA_DATA(tb[TCA_CBQ_RATE-1]), tb[TCA_CBQ_RTAB-1]); | 1860 | rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); |
1861 | if (rtab == NULL) | 1861 | if (rtab == NULL) |
1862 | return -EINVAL; | 1862 | return -EINVAL; |
1863 | 1863 | ||
@@ -1912,8 +1912,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1912 | cl->share = cl->tparent; | 1912 | cl->share = cl->tparent; |
1913 | cbq_adjust_levels(parent); | 1913 | cbq_adjust_levels(parent); |
1914 | cl->minidle = -0x7FFFFFFF; | 1914 | cl->minidle = -0x7FFFFFFF; |
1915 | cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1])); | 1915 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); |
1916 | cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1])); | 1916 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
1917 | if (cl->ewma_log==0) | 1917 | if (cl->ewma_log==0) |
1918 | cl->ewma_log = q->link.ewma_log; | 1918 | cl->ewma_log = q->link.ewma_log; |
1919 | if (cl->maxidle==0) | 1919 | if (cl->maxidle==0) |
@@ -1921,19 +1921,19 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1921 | if (cl->avpkt==0) | 1921 | if (cl->avpkt==0) |
1922 | cl->avpkt = q->link.avpkt; | 1922 | cl->avpkt = q->link.avpkt; |
1923 | cl->overlimit = cbq_ovl_classic; | 1923 | cl->overlimit = cbq_ovl_classic; |
1924 | if (tb[TCA_CBQ_OVL_STRATEGY-1]) | 1924 | if (tb[TCA_CBQ_OVL_STRATEGY]) |
1925 | cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); | 1925 | cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); |
1926 | #ifdef CONFIG_NET_CLS_ACT | 1926 | #ifdef CONFIG_NET_CLS_ACT |
1927 | if (tb[TCA_CBQ_POLICE-1]) | 1927 | if (tb[TCA_CBQ_POLICE]) |
1928 | cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); | 1928 | cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); |
1929 | #endif | 1929 | #endif |
1930 | if (tb[TCA_CBQ_FOPT-1]) | 1930 | if (tb[TCA_CBQ_FOPT]) |
1931 | cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1])); | 1931 | cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); |
1932 | sch_tree_unlock(sch); | 1932 | sch_tree_unlock(sch); |
1933 | 1933 | ||
1934 | if (tca[TCA_RATE-1]) | 1934 | if (tca[TCA_RATE]) |
1935 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1935 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1936 | &sch->dev->queue_lock, tca[TCA_RATE-1]); | 1936 | &sch->dev->queue_lock, tca[TCA_RATE]); |
1937 | 1937 | ||
1938 | *arg = (unsigned long)cl; | 1938 | *arg = (unsigned long)cl; |
1939 | return 0; | 1939 | return 0; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 40e06a6890da..f183ab768873 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -100,11 +100,11 @@ static void dsmark_put(struct Qdisc *sch, unsigned long cl) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, | 102 | static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, |
103 | struct rtattr **tca, unsigned long *arg) | 103 | struct nlattr **tca, unsigned long *arg) |
104 | { | 104 | { |
105 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 105 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
106 | struct rtattr *opt = tca[TCA_OPTIONS-1]; | 106 | struct nlattr *opt = tca[TCA_OPTIONS]; |
107 | struct rtattr *tb[TCA_DSMARK_MAX]; | 107 | struct nlattr *tb[TCA_DSMARK_MAX + 1]; |
108 | int err = -EINVAL; | 108 | int err = -EINVAL; |
109 | u8 mask = 0; | 109 | u8 mask = 0; |
110 | 110 | ||
@@ -113,24 +113,29 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
113 | 113 | ||
114 | if (!dsmark_valid_index(p, *arg)) { | 114 | if (!dsmark_valid_index(p, *arg)) { |
115 | err = -ENOENT; | 115 | err = -ENOENT; |
116 | goto rtattr_failure; | 116 | goto errout; |
117 | } | 117 | } |
118 | 118 | ||
119 | if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt)) | 119 | if (!opt || nla_parse_nested(tb, TCA_DSMARK_MAX, opt, NULL)) |
120 | goto rtattr_failure; | 120 | goto errout; |
121 | |||
122 | if (tb[TCA_DSMARK_MASK-1]) | ||
123 | mask = RTA_GET_U8(tb[TCA_DSMARK_MASK-1]); | ||
124 | 121 | ||
125 | if (tb[TCA_DSMARK_VALUE-1]) | 122 | if (tb[TCA_DSMARK_MASK]) { |
126 | p->value[*arg-1] = RTA_GET_U8(tb[TCA_DSMARK_VALUE-1]); | 123 | if (nla_len(tb[TCA_DSMARK_MASK]) < sizeof(u8)) |
124 | goto errout; | ||
125 | mask = nla_get_u8(tb[TCA_DSMARK_MASK]); | ||
126 | } | ||
127 | if (tb[TCA_DSMARK_VALUE]) { | ||
128 | if (nla_len(tb[TCA_DSMARK_VALUE]) < sizeof(u8)) | ||
129 | goto errout; | ||
130 | p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); | ||
131 | } | ||
127 | 132 | ||
128 | if (tb[TCA_DSMARK_MASK-1]) | 133 | if (tb[TCA_DSMARK_MASK]) |
129 | p->mask[*arg-1] = mask; | 134 | p->mask[*arg-1] = mask; |
130 | 135 | ||
131 | err = 0; | 136 | err = 0; |
132 | 137 | ||
133 | rtattr_failure: | 138 | errout: |
134 | return err; | 139 | return err; |
135 | } | 140 | } |
136 | 141 | ||
@@ -335,10 +340,10 @@ static unsigned int dsmark_drop(struct Qdisc *sch) | |||
335 | return len; | 340 | return len; |
336 | } | 341 | } |
337 | 342 | ||
338 | static int dsmark_init(struct Qdisc *sch, struct rtattr *opt) | 343 | static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) |
339 | { | 344 | { |
340 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 345 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
341 | struct rtattr *tb[TCA_DSMARK_MAX]; | 346 | struct nlattr *tb[TCA_DSMARK_MAX + 1]; |
342 | int err = -EINVAL; | 347 | int err = -EINVAL; |
343 | u32 default_index = NO_DEFAULT_INDEX; | 348 | u32 default_index = NO_DEFAULT_INDEX; |
344 | u16 indices; | 349 | u16 indices; |
@@ -346,16 +351,21 @@ static int dsmark_init(struct Qdisc *sch, struct rtattr *opt) | |||
346 | 351 | ||
347 | pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); | 352 | pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); |
348 | 353 | ||
349 | if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt) < 0) | 354 | if (!opt || nla_parse_nested(tb, TCA_DSMARK_MAX, opt, NULL) < 0) |
350 | goto errout; | 355 | goto errout; |
351 | 356 | ||
352 | indices = RTA_GET_U16(tb[TCA_DSMARK_INDICES-1]); | 357 | if (nla_len(tb[TCA_DSMARK_INDICES]) < sizeof(u16)) |
358 | goto errout; | ||
359 | indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); | ||
353 | 360 | ||
354 | if (hweight32(indices) != 1) | 361 | if (hweight32(indices) != 1) |
355 | goto errout; | 362 | goto errout; |
356 | 363 | ||
357 | if (tb[TCA_DSMARK_DEFAULT_INDEX-1]) | 364 | if (tb[TCA_DSMARK_DEFAULT_INDEX]) { |
358 | default_index = RTA_GET_U16(tb[TCA_DSMARK_DEFAULT_INDEX-1]); | 365 | if (nla_len(tb[TCA_DSMARK_DEFAULT_INDEX]) < sizeof(u16)) |
366 | goto errout; | ||
367 | default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]); | ||
368 | } | ||
359 | 369 | ||
360 | mask = kmalloc(indices * 2, GFP_KERNEL); | 370 | mask = kmalloc(indices * 2, GFP_KERNEL); |
361 | if (mask == NULL) { | 371 | if (mask == NULL) { |
@@ -371,7 +381,7 @@ static int dsmark_init(struct Qdisc *sch, struct rtattr *opt) | |||
371 | 381 | ||
372 | p->indices = indices; | 382 | p->indices = indices; |
373 | p->default_index = default_index; | 383 | p->default_index = default_index; |
374 | p->set_tc_index = RTA_GET_FLAG(tb[TCA_DSMARK_SET_TC_INDEX-1]); | 384 | p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); |
375 | 385 | ||
376 | p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); | 386 | p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); |
377 | if (p->q == NULL) | 387 | if (p->q == NULL) |
@@ -381,7 +391,6 @@ static int dsmark_init(struct Qdisc *sch, struct rtattr *opt) | |||
381 | 391 | ||
382 | err = 0; | 392 | err = 0; |
383 | errout: | 393 | errout: |
384 | rtattr_failure: | ||
385 | return err; | 394 | return err; |
386 | } | 395 | } |
387 | 396 | ||
@@ -409,7 +418,7 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, | |||
409 | struct sk_buff *skb, struct tcmsg *tcm) | 418 | struct sk_buff *skb, struct tcmsg *tcm) |
410 | { | 419 | { |
411 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 420 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
412 | struct rtattr *opts = NULL; | 421 | struct nlattr *opts = NULL; |
413 | 422 | ||
414 | pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch, p, cl); | 423 | pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch, p, cl); |
415 | 424 | ||
@@ -419,34 +428,38 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl, | |||
419 | tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1); | 428 | tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1); |
420 | tcm->tcm_info = p->q->handle; | 429 | tcm->tcm_info = p->q->handle; |
421 | 430 | ||
422 | opts = RTA_NEST(skb, TCA_OPTIONS); | 431 | opts = nla_nest_start(skb, TCA_OPTIONS); |
423 | RTA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); | 432 | if (opts == NULL) |
424 | RTA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); | 433 | goto nla_put_failure; |
434 | NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); | ||
435 | NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); | ||
425 | 436 | ||
426 | return RTA_NEST_END(skb, opts); | 437 | return nla_nest_end(skb, opts); |
427 | 438 | ||
428 | rtattr_failure: | 439 | nla_put_failure: |
429 | return RTA_NEST_CANCEL(skb, opts); | 440 | return nla_nest_cancel(skb, opts); |
430 | } | 441 | } |
431 | 442 | ||
432 | static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) | 443 | static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) |
433 | { | 444 | { |
434 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 445 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
435 | struct rtattr *opts = NULL; | 446 | struct nlattr *opts = NULL; |
436 | 447 | ||
437 | opts = RTA_NEST(skb, TCA_OPTIONS); | 448 | opts = nla_nest_start(skb, TCA_OPTIONS); |
438 | RTA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); | 449 | if (opts == NULL) |
450 | goto nla_put_failure; | ||
451 | NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); | ||
439 | 452 | ||
440 | if (p->default_index != NO_DEFAULT_INDEX) | 453 | if (p->default_index != NO_DEFAULT_INDEX) |
441 | RTA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); | 454 | NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); |
442 | 455 | ||
443 | if (p->set_tc_index) | 456 | if (p->set_tc_index) |
444 | RTA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); | 457 | NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); |
445 | 458 | ||
446 | return RTA_NEST_END(skb, opts); | 459 | return nla_nest_end(skb, opts); |
447 | 460 | ||
448 | rtattr_failure: | 461 | nla_put_failure: |
449 | return RTA_NEST_CANCEL(skb, opts); | 462 | return nla_nest_cancel(skb, opts); |
450 | } | 463 | } |
451 | 464 | ||
452 | static const struct Qdisc_class_ops dsmark_class_ops = { | 465 | static const struct Qdisc_class_ops dsmark_class_ops = { |
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index fd0591903c8a..95ed48221652 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -43,7 +43,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
43 | return qdisc_reshape_fail(skb, sch); | 43 | return qdisc_reshape_fail(skb, sch); |
44 | } | 44 | } |
45 | 45 | ||
46 | static int fifo_init(struct Qdisc *sch, struct rtattr *opt) | 46 | static int fifo_init(struct Qdisc *sch, struct nlattr *opt) |
47 | { | 47 | { |
48 | struct fifo_sched_data *q = qdisc_priv(sch); | 48 | struct fifo_sched_data *q = qdisc_priv(sch); |
49 | 49 | ||
@@ -55,9 +55,9 @@ static int fifo_init(struct Qdisc *sch, struct rtattr *opt) | |||
55 | 55 | ||
56 | q->limit = limit; | 56 | q->limit = limit; |
57 | } else { | 57 | } else { |
58 | struct tc_fifo_qopt *ctl = RTA_DATA(opt); | 58 | struct tc_fifo_qopt *ctl = nla_data(opt); |
59 | 59 | ||
60 | if (RTA_PAYLOAD(opt) < sizeof(*ctl)) | 60 | if (nla_len(opt) < sizeof(*ctl)) |
61 | return -EINVAL; | 61 | return -EINVAL; |
62 | 62 | ||
63 | q->limit = ctl->limit; | 63 | q->limit = ctl->limit; |
@@ -71,10 +71,10 @@ static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
71 | struct fifo_sched_data *q = qdisc_priv(sch); | 71 | struct fifo_sched_data *q = qdisc_priv(sch); |
72 | struct tc_fifo_qopt opt = { .limit = q->limit }; | 72 | struct tc_fifo_qopt opt = { .limit = q->limit }; |
73 | 73 | ||
74 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 74 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
75 | return skb->len; | 75 | return skb->len; |
76 | 76 | ||
77 | rtattr_failure: | 77 | nla_put_failure: |
78 | return -1; | 78 | return -1; |
79 | } | 79 | } |
80 | 80 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 51e64acd5098..10b5c0887fff 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -397,14 +397,14 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | |||
397 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | 397 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
398 | 398 | ||
399 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); | 399 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); |
400 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 400 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
401 | return skb->len; | 401 | return skb->len; |
402 | 402 | ||
403 | rtattr_failure: | 403 | nla_put_failure: |
404 | return -1; | 404 | return -1; |
405 | } | 405 | } |
406 | 406 | ||
407 | static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) | 407 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) |
408 | { | 408 | { |
409 | int prio; | 409 | int prio; |
410 | struct sk_buff_head *list = qdisc_priv(qdisc); | 410 | struct sk_buff_head *list = qdisc_priv(qdisc); |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index e2bcd6682c70..6b784838a534 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -350,16 +350,16 @@ static inline void gred_destroy_vq(struct gred_sched_data *q) | |||
350 | kfree(q); | 350 | kfree(q); |
351 | } | 351 | } |
352 | 352 | ||
353 | static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps) | 353 | static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) |
354 | { | 354 | { |
355 | struct gred_sched *table = qdisc_priv(sch); | 355 | struct gred_sched *table = qdisc_priv(sch); |
356 | struct tc_gred_sopt *sopt; | 356 | struct tc_gred_sopt *sopt; |
357 | int i; | 357 | int i; |
358 | 358 | ||
359 | if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt)) | 359 | if (dps == NULL || nla_len(dps) < sizeof(*sopt)) |
360 | return -EINVAL; | 360 | return -EINVAL; |
361 | 361 | ||
362 | sopt = RTA_DATA(dps); | 362 | sopt = nla_data(dps); |
363 | 363 | ||
364 | if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs) | 364 | if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs) |
365 | return -EINVAL; | 365 | return -EINVAL; |
@@ -425,28 +425,28 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, | |||
425 | return 0; | 425 | return 0; |
426 | } | 426 | } |
427 | 427 | ||
428 | static int gred_change(struct Qdisc *sch, struct rtattr *opt) | 428 | static int gred_change(struct Qdisc *sch, struct nlattr *opt) |
429 | { | 429 | { |
430 | struct gred_sched *table = qdisc_priv(sch); | 430 | struct gred_sched *table = qdisc_priv(sch); |
431 | struct tc_gred_qopt *ctl; | 431 | struct tc_gred_qopt *ctl; |
432 | struct rtattr *tb[TCA_GRED_MAX]; | 432 | struct nlattr *tb[TCA_GRED_MAX + 1]; |
433 | int err = -EINVAL, prio = GRED_DEF_PRIO; | 433 | int err = -EINVAL, prio = GRED_DEF_PRIO; |
434 | u8 *stab; | 434 | u8 *stab; |
435 | 435 | ||
436 | if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt)) | 436 | if (opt == NULL || nla_parse_nested(tb, TCA_GRED_MAX, opt, NULL)) |
437 | return -EINVAL; | 437 | return -EINVAL; |
438 | 438 | ||
439 | if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL) | 439 | if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) |
440 | return gred_change_table_def(sch, opt); | 440 | return gred_change_table_def(sch, opt); |
441 | 441 | ||
442 | if (tb[TCA_GRED_PARMS-1] == NULL || | 442 | if (tb[TCA_GRED_PARMS] == NULL || |
443 | RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) || | 443 | nla_len(tb[TCA_GRED_PARMS]) < sizeof(*ctl) || |
444 | tb[TCA_GRED_STAB-1] == NULL || | 444 | tb[TCA_GRED_STAB] == NULL || |
445 | RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256) | 445 | nla_len(tb[TCA_GRED_STAB]) < 256) |
446 | return -EINVAL; | 446 | return -EINVAL; |
447 | 447 | ||
448 | ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]); | 448 | ctl = nla_data(tb[TCA_GRED_PARMS]); |
449 | stab = RTA_DATA(tb[TCA_GRED_STAB-1]); | 449 | stab = nla_data(tb[TCA_GRED_STAB]); |
450 | 450 | ||
451 | if (ctl->DP >= table->DPs) | 451 | if (ctl->DP >= table->DPs) |
452 | goto errout; | 452 | goto errout; |
@@ -486,23 +486,23 @@ errout: | |||
486 | return err; | 486 | return err; |
487 | } | 487 | } |
488 | 488 | ||
489 | static int gred_init(struct Qdisc *sch, struct rtattr *opt) | 489 | static int gred_init(struct Qdisc *sch, struct nlattr *opt) |
490 | { | 490 | { |
491 | struct rtattr *tb[TCA_GRED_MAX]; | 491 | struct nlattr *tb[TCA_GRED_MAX + 1]; |
492 | 492 | ||
493 | if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt)) | 493 | if (opt == NULL || nla_parse_nested(tb, TCA_GRED_MAX, opt, NULL)) |
494 | return -EINVAL; | 494 | return -EINVAL; |
495 | 495 | ||
496 | if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1]) | 496 | if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) |
497 | return -EINVAL; | 497 | return -EINVAL; |
498 | 498 | ||
499 | return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]); | 499 | return gred_change_table_def(sch, tb[TCA_GRED_DPS]); |
500 | } | 500 | } |
501 | 501 | ||
502 | static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | 502 | static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) |
503 | { | 503 | { |
504 | struct gred_sched *table = qdisc_priv(sch); | 504 | struct gred_sched *table = qdisc_priv(sch); |
505 | struct rtattr *parms, *opts = NULL; | 505 | struct nlattr *parms, *opts = NULL; |
506 | int i; | 506 | int i; |
507 | struct tc_gred_sopt sopt = { | 507 | struct tc_gred_sopt sopt = { |
508 | .DPs = table->DPs, | 508 | .DPs = table->DPs, |
@@ -511,9 +511,13 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
511 | .flags = table->red_flags, | 511 | .flags = table->red_flags, |
512 | }; | 512 | }; |
513 | 513 | ||
514 | opts = RTA_NEST(skb, TCA_OPTIONS); | 514 | opts = nla_nest_start(skb, TCA_OPTIONS); |
515 | RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); | 515 | if (opts == NULL) |
516 | parms = RTA_NEST(skb, TCA_GRED_PARMS); | 516 | goto nla_put_failure; |
517 | NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); | ||
518 | parms = nla_nest_start(skb, TCA_GRED_PARMS); | ||
519 | if (parms == NULL) | ||
520 | goto nla_put_failure; | ||
517 | 521 | ||
518 | for (i = 0; i < MAX_DPs; i++) { | 522 | for (i = 0; i < MAX_DPs; i++) { |
519 | struct gred_sched_data *q = table->tab[i]; | 523 | struct gred_sched_data *q = table->tab[i]; |
@@ -555,15 +559,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
555 | opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); | 559 | opt.qave = red_calc_qavg(&q->parms, q->parms.qavg); |
556 | 560 | ||
557 | append_opt: | 561 | append_opt: |
558 | RTA_APPEND(skb, sizeof(opt), &opt); | 562 | if (nla_append(skb, sizeof(opt), &opt) < 0) |
563 | goto nla_put_failure; | ||
559 | } | 564 | } |
560 | 565 | ||
561 | RTA_NEST_END(skb, parms); | 566 | nla_nest_end(skb, parms); |
562 | 567 | ||
563 | return RTA_NEST_END(skb, opts); | 568 | return nla_nest_end(skb, opts); |
564 | 569 | ||
565 | rtattr_failure: | 570 | nla_put_failure: |
566 | return RTA_NEST_CANCEL(skb, opts); | 571 | return nla_nest_cancel(skb, opts); |
567 | } | 572 | } |
568 | 573 | ||
569 | static void gred_destroy(struct Qdisc *sch) | 574 | static void gred_destroy(struct Qdisc *sch) |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 69dc3bccf024..4e6a164d3058 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -988,39 +988,39 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, | |||
988 | 988 | ||
989 | static int | 989 | static int |
990 | hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | 990 | hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, |
991 | struct rtattr **tca, unsigned long *arg) | 991 | struct nlattr **tca, unsigned long *arg) |
992 | { | 992 | { |
993 | struct hfsc_sched *q = qdisc_priv(sch); | 993 | struct hfsc_sched *q = qdisc_priv(sch); |
994 | struct hfsc_class *cl = (struct hfsc_class *)*arg; | 994 | struct hfsc_class *cl = (struct hfsc_class *)*arg; |
995 | struct hfsc_class *parent = NULL; | 995 | struct hfsc_class *parent = NULL; |
996 | struct rtattr *opt = tca[TCA_OPTIONS-1]; | 996 | struct nlattr *opt = tca[TCA_OPTIONS]; |
997 | struct rtattr *tb[TCA_HFSC_MAX]; | 997 | struct nlattr *tb[TCA_HFSC_MAX + 1]; |
998 | struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; | 998 | struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; |
999 | u64 cur_time; | 999 | u64 cur_time; |
1000 | 1000 | ||
1001 | if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt)) | 1001 | if (opt == NULL || nla_parse_nested(tb, TCA_HFSC_MAX, opt, NULL)) |
1002 | return -EINVAL; | 1002 | return -EINVAL; |
1003 | 1003 | ||
1004 | if (tb[TCA_HFSC_RSC-1]) { | 1004 | if (tb[TCA_HFSC_RSC]) { |
1005 | if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc)) | 1005 | if (nla_len(tb[TCA_HFSC_RSC]) < sizeof(*rsc)) |
1006 | return -EINVAL; | 1006 | return -EINVAL; |
1007 | rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]); | 1007 | rsc = nla_data(tb[TCA_HFSC_RSC]); |
1008 | if (rsc->m1 == 0 && rsc->m2 == 0) | 1008 | if (rsc->m1 == 0 && rsc->m2 == 0) |
1009 | rsc = NULL; | 1009 | rsc = NULL; |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | if (tb[TCA_HFSC_FSC-1]) { | 1012 | if (tb[TCA_HFSC_FSC]) { |
1013 | if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc)) | 1013 | if (nla_len(tb[TCA_HFSC_FSC]) < sizeof(*fsc)) |
1014 | return -EINVAL; | 1014 | return -EINVAL; |
1015 | fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]); | 1015 | fsc = nla_data(tb[TCA_HFSC_FSC]); |
1016 | if (fsc->m1 == 0 && fsc->m2 == 0) | 1016 | if (fsc->m1 == 0 && fsc->m2 == 0) |
1017 | fsc = NULL; | 1017 | fsc = NULL; |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | if (tb[TCA_HFSC_USC-1]) { | 1020 | if (tb[TCA_HFSC_USC]) { |
1021 | if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc)) | 1021 | if (nla_len(tb[TCA_HFSC_USC]) < sizeof(*usc)) |
1022 | return -EINVAL; | 1022 | return -EINVAL; |
1023 | usc = RTA_DATA(tb[TCA_HFSC_USC-1]); | 1023 | usc = nla_data(tb[TCA_HFSC_USC]); |
1024 | if (usc->m1 == 0 && usc->m2 == 0) | 1024 | if (usc->m1 == 0 && usc->m2 == 0) |
1025 | usc = NULL; | 1025 | usc = NULL; |
1026 | } | 1026 | } |
@@ -1050,10 +1050,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1050 | } | 1050 | } |
1051 | sch_tree_unlock(sch); | 1051 | sch_tree_unlock(sch); |
1052 | 1052 | ||
1053 | if (tca[TCA_RATE-1]) | 1053 | if (tca[TCA_RATE]) |
1054 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1054 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1055 | &sch->dev->queue_lock, | 1055 | &sch->dev->queue_lock, |
1056 | tca[TCA_RATE-1]); | 1056 | tca[TCA_RATE]); |
1057 | return 0; | 1057 | return 0; |
1058 | } | 1058 | } |
1059 | 1059 | ||
@@ -1106,9 +1106,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1106 | cl->cl_pcvtoff = parent->cl_cvtoff; | 1106 | cl->cl_pcvtoff = parent->cl_cvtoff; |
1107 | sch_tree_unlock(sch); | 1107 | sch_tree_unlock(sch); |
1108 | 1108 | ||
1109 | if (tca[TCA_RATE-1]) | 1109 | if (tca[TCA_RATE]) |
1110 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1110 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1111 | &sch->dev->queue_lock, tca[TCA_RATE-1]); | 1111 | &sch->dev->queue_lock, tca[TCA_RATE]); |
1112 | *arg = (unsigned long)cl; | 1112 | *arg = (unsigned long)cl; |
1113 | return 0; | 1113 | return 0; |
1114 | } | 1114 | } |
@@ -1304,11 +1304,11 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) | |||
1304 | tsc.m1 = sm2m(sc->sm1); | 1304 | tsc.m1 = sm2m(sc->sm1); |
1305 | tsc.d = dx2d(sc->dx); | 1305 | tsc.d = dx2d(sc->dx); |
1306 | tsc.m2 = sm2m(sc->sm2); | 1306 | tsc.m2 = sm2m(sc->sm2); |
1307 | RTA_PUT(skb, attr, sizeof(tsc), &tsc); | 1307 | NLA_PUT(skb, attr, sizeof(tsc), &tsc); |
1308 | 1308 | ||
1309 | return skb->len; | 1309 | return skb->len; |
1310 | 1310 | ||
1311 | rtattr_failure: | 1311 | nla_put_failure: |
1312 | return -1; | 1312 | return -1; |
1313 | } | 1313 | } |
1314 | 1314 | ||
@@ -1317,19 +1317,19 @@ hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) | |||
1317 | { | 1317 | { |
1318 | if ((cl->cl_flags & HFSC_RSC) && | 1318 | if ((cl->cl_flags & HFSC_RSC) && |
1319 | (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) | 1319 | (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) |
1320 | goto rtattr_failure; | 1320 | goto nla_put_failure; |
1321 | 1321 | ||
1322 | if ((cl->cl_flags & HFSC_FSC) && | 1322 | if ((cl->cl_flags & HFSC_FSC) && |
1323 | (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) | 1323 | (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) |
1324 | goto rtattr_failure; | 1324 | goto nla_put_failure; |
1325 | 1325 | ||
1326 | if ((cl->cl_flags & HFSC_USC) && | 1326 | if ((cl->cl_flags & HFSC_USC) && |
1327 | (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) | 1327 | (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) |
1328 | goto rtattr_failure; | 1328 | goto nla_put_failure; |
1329 | 1329 | ||
1330 | return skb->len; | 1330 | return skb->len; |
1331 | 1331 | ||
1332 | rtattr_failure: | 1332 | nla_put_failure: |
1333 | return -1; | 1333 | return -1; |
1334 | } | 1334 | } |
1335 | 1335 | ||
@@ -1339,20 +1339,20 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, | |||
1339 | { | 1339 | { |
1340 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1340 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
1341 | unsigned char *b = skb_tail_pointer(skb); | 1341 | unsigned char *b = skb_tail_pointer(skb); |
1342 | struct rtattr *rta = (struct rtattr *)b; | 1342 | struct nlattr *nla = (struct nlattr *)b; |
1343 | 1343 | ||
1344 | tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; | 1344 | tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; |
1345 | tcm->tcm_handle = cl->classid; | 1345 | tcm->tcm_handle = cl->classid; |
1346 | if (cl->level == 0) | 1346 | if (cl->level == 0) |
1347 | tcm->tcm_info = cl->qdisc->handle; | 1347 | tcm->tcm_info = cl->qdisc->handle; |
1348 | 1348 | ||
1349 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1349 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1350 | if (hfsc_dump_curves(skb, cl) < 0) | 1350 | if (hfsc_dump_curves(skb, cl) < 0) |
1351 | goto rtattr_failure; | 1351 | goto nla_put_failure; |
1352 | rta->rta_len = skb_tail_pointer(skb) - b; | 1352 | nla->nla_len = skb_tail_pointer(skb) - b; |
1353 | return skb->len; | 1353 | return skb->len; |
1354 | 1354 | ||
1355 | rtattr_failure: | 1355 | nla_put_failure: |
1356 | nlmsg_trim(skb, b); | 1356 | nlmsg_trim(skb, b); |
1357 | return -1; | 1357 | return -1; |
1358 | } | 1358 | } |
@@ -1423,15 +1423,15 @@ hfsc_schedule_watchdog(struct Qdisc *sch) | |||
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | static int | 1425 | static int |
1426 | hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt) | 1426 | hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) |
1427 | { | 1427 | { |
1428 | struct hfsc_sched *q = qdisc_priv(sch); | 1428 | struct hfsc_sched *q = qdisc_priv(sch); |
1429 | struct tc_hfsc_qopt *qopt; | 1429 | struct tc_hfsc_qopt *qopt; |
1430 | unsigned int i; | 1430 | unsigned int i; |
1431 | 1431 | ||
1432 | if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) | 1432 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) |
1433 | return -EINVAL; | 1433 | return -EINVAL; |
1434 | qopt = RTA_DATA(opt); | 1434 | qopt = nla_data(opt); |
1435 | 1435 | ||
1436 | q->defcls = qopt->defcls; | 1436 | q->defcls = qopt->defcls; |
1437 | for (i = 0; i < HFSC_HSIZE; i++) | 1437 | for (i = 0; i < HFSC_HSIZE; i++) |
@@ -1459,14 +1459,14 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt) | |||
1459 | } | 1459 | } |
1460 | 1460 | ||
1461 | static int | 1461 | static int |
1462 | hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt) | 1462 | hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) |
1463 | { | 1463 | { |
1464 | struct hfsc_sched *q = qdisc_priv(sch); | 1464 | struct hfsc_sched *q = qdisc_priv(sch); |
1465 | struct tc_hfsc_qopt *qopt; | 1465 | struct tc_hfsc_qopt *qopt; |
1466 | 1466 | ||
1467 | if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) | 1467 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) |
1468 | return -EINVAL; | 1468 | return -EINVAL; |
1469 | qopt = RTA_DATA(opt); | 1469 | qopt = nla_data(opt); |
1470 | 1470 | ||
1471 | sch_tree_lock(sch); | 1471 | sch_tree_lock(sch); |
1472 | q->defcls = qopt->defcls; | 1472 | q->defcls = qopt->defcls; |
@@ -1550,10 +1550,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
1550 | struct tc_hfsc_qopt qopt; | 1550 | struct tc_hfsc_qopt qopt; |
1551 | 1551 | ||
1552 | qopt.defcls = q->defcls; | 1552 | qopt.defcls = q->defcls; |
1553 | RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); | 1553 | NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); |
1554 | return skb->len; | 1554 | return skb->len; |
1555 | 1555 | ||
1556 | rtattr_failure: | 1556 | nla_put_failure: |
1557 | nlmsg_trim(skb, b); | 1557 | nlmsg_trim(skb, b); |
1558 | return -1; | 1558 | return -1; |
1559 | } | 1559 | } |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 6a2352cd9c2e..3b3ff641b6d7 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -992,19 +992,19 @@ static void htb_reset(struct Qdisc *sch) | |||
992 | INIT_LIST_HEAD(q->drops + i); | 992 | INIT_LIST_HEAD(q->drops + i); |
993 | } | 993 | } |
994 | 994 | ||
995 | static int htb_init(struct Qdisc *sch, struct rtattr *opt) | 995 | static int htb_init(struct Qdisc *sch, struct nlattr *opt) |
996 | { | 996 | { |
997 | struct htb_sched *q = qdisc_priv(sch); | 997 | struct htb_sched *q = qdisc_priv(sch); |
998 | struct rtattr *tb[TCA_HTB_INIT]; | 998 | struct nlattr *tb[TCA_HTB_INIT + 1]; |
999 | struct tc_htb_glob *gopt; | 999 | struct tc_htb_glob *gopt; |
1000 | int i; | 1000 | int i; |
1001 | if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) || | 1001 | if (!opt || nla_parse_nested(tb, TCA_HTB_INIT, opt, NULL) || |
1002 | tb[TCA_HTB_INIT - 1] == NULL || | 1002 | tb[TCA_HTB_INIT] == NULL || |
1003 | RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) { | 1003 | nla_len(tb[TCA_HTB_INIT]) < sizeof(*gopt)) { |
1004 | printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); | 1004 | printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); |
1005 | return -EINVAL; | 1005 | return -EINVAL; |
1006 | } | 1006 | } |
1007 | gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]); | 1007 | gopt = nla_data(tb[TCA_HTB_INIT]); |
1008 | if (gopt->version != HTB_VER >> 16) { | 1008 | if (gopt->version != HTB_VER >> 16) { |
1009 | printk(KERN_ERR | 1009 | printk(KERN_ERR |
1010 | "HTB: need tc/htb version %d (minor is %d), you have %d\n", | 1010 | "HTB: need tc/htb version %d (minor is %d), you have %d\n", |
@@ -1036,7 +1036,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1036 | { | 1036 | { |
1037 | struct htb_sched *q = qdisc_priv(sch); | 1037 | struct htb_sched *q = qdisc_priv(sch); |
1038 | unsigned char *b = skb_tail_pointer(skb); | 1038 | unsigned char *b = skb_tail_pointer(skb); |
1039 | struct rtattr *rta; | 1039 | struct nlattr *nla; |
1040 | struct tc_htb_glob gopt; | 1040 | struct tc_htb_glob gopt; |
1041 | spin_lock_bh(&sch->dev->queue_lock); | 1041 | spin_lock_bh(&sch->dev->queue_lock); |
1042 | gopt.direct_pkts = q->direct_pkts; | 1042 | gopt.direct_pkts = q->direct_pkts; |
@@ -1045,13 +1045,13 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1045 | gopt.rate2quantum = q->rate2quantum; | 1045 | gopt.rate2quantum = q->rate2quantum; |
1046 | gopt.defcls = q->defcls; | 1046 | gopt.defcls = q->defcls; |
1047 | gopt.debug = 0; | 1047 | gopt.debug = 0; |
1048 | rta = (struct rtattr *)b; | 1048 | nla = (struct nlattr *)b; |
1049 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1049 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1050 | RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); | 1050 | NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); |
1051 | rta->rta_len = skb_tail_pointer(skb) - b; | 1051 | nla->nla_len = skb_tail_pointer(skb) - b; |
1052 | spin_unlock_bh(&sch->dev->queue_lock); | 1052 | spin_unlock_bh(&sch->dev->queue_lock); |
1053 | return skb->len; | 1053 | return skb->len; |
1054 | rtattr_failure: | 1054 | nla_put_failure: |
1055 | spin_unlock_bh(&sch->dev->queue_lock); | 1055 | spin_unlock_bh(&sch->dev->queue_lock); |
1056 | nlmsg_trim(skb, skb_tail_pointer(skb)); | 1056 | nlmsg_trim(skb, skb_tail_pointer(skb)); |
1057 | return -1; | 1057 | return -1; |
@@ -1062,7 +1062,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1062 | { | 1062 | { |
1063 | struct htb_class *cl = (struct htb_class *)arg; | 1063 | struct htb_class *cl = (struct htb_class *)arg; |
1064 | unsigned char *b = skb_tail_pointer(skb); | 1064 | unsigned char *b = skb_tail_pointer(skb); |
1065 | struct rtattr *rta; | 1065 | struct nlattr *nla; |
1066 | struct tc_htb_opt opt; | 1066 | struct tc_htb_opt opt; |
1067 | 1067 | ||
1068 | spin_lock_bh(&sch->dev->queue_lock); | 1068 | spin_lock_bh(&sch->dev->queue_lock); |
@@ -1071,8 +1071,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1071 | if (!cl->level && cl->un.leaf.q) | 1071 | if (!cl->level && cl->un.leaf.q) |
1072 | tcm->tcm_info = cl->un.leaf.q->handle; | 1072 | tcm->tcm_info = cl->un.leaf.q->handle; |
1073 | 1073 | ||
1074 | rta = (struct rtattr *)b; | 1074 | nla = (struct nlattr *)b; |
1075 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 1075 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
1076 | 1076 | ||
1077 | memset(&opt, 0, sizeof(opt)); | 1077 | memset(&opt, 0, sizeof(opt)); |
1078 | 1078 | ||
@@ -1083,11 +1083,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1083 | opt.quantum = cl->un.leaf.quantum; | 1083 | opt.quantum = cl->un.leaf.quantum; |
1084 | opt.prio = cl->un.leaf.prio; | 1084 | opt.prio = cl->un.leaf.prio; |
1085 | opt.level = cl->level; | 1085 | opt.level = cl->level; |
1086 | RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); | 1086 | NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); |
1087 | rta->rta_len = skb_tail_pointer(skb) - b; | 1087 | nla->nla_len = skb_tail_pointer(skb) - b; |
1088 | spin_unlock_bh(&sch->dev->queue_lock); | 1088 | spin_unlock_bh(&sch->dev->queue_lock); |
1089 | return skb->len; | 1089 | return skb->len; |
1090 | rtattr_failure: | 1090 | nla_put_failure: |
1091 | spin_unlock_bh(&sch->dev->queue_lock); | 1091 | spin_unlock_bh(&sch->dev->queue_lock); |
1092 | nlmsg_trim(skb, b); | 1092 | nlmsg_trim(skb, b); |
1093 | return -1; | 1093 | return -1; |
@@ -1290,29 +1290,29 @@ static void htb_put(struct Qdisc *sch, unsigned long arg) | |||
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | static int htb_change_class(struct Qdisc *sch, u32 classid, | 1292 | static int htb_change_class(struct Qdisc *sch, u32 classid, |
1293 | u32 parentid, struct rtattr **tca, | 1293 | u32 parentid, struct nlattr **tca, |
1294 | unsigned long *arg) | 1294 | unsigned long *arg) |
1295 | { | 1295 | { |
1296 | int err = -EINVAL; | 1296 | int err = -EINVAL; |
1297 | struct htb_sched *q = qdisc_priv(sch); | 1297 | struct htb_sched *q = qdisc_priv(sch); |
1298 | struct htb_class *cl = (struct htb_class *)*arg, *parent; | 1298 | struct htb_class *cl = (struct htb_class *)*arg, *parent; |
1299 | struct rtattr *opt = tca[TCA_OPTIONS - 1]; | 1299 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1300 | struct qdisc_rate_table *rtab = NULL, *ctab = NULL; | 1300 | struct qdisc_rate_table *rtab = NULL, *ctab = NULL; |
1301 | struct rtattr *tb[TCA_HTB_RTAB]; | 1301 | struct nlattr *tb[TCA_HTB_RTAB + 1]; |
1302 | struct tc_htb_opt *hopt; | 1302 | struct tc_htb_opt *hopt; |
1303 | 1303 | ||
1304 | /* extract all subattrs from opt attr */ | 1304 | /* extract all subattrs from opt attr */ |
1305 | if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) || | 1305 | if (!opt || nla_parse_nested(tb, TCA_HTB_RTAB, opt, NULL) || |
1306 | tb[TCA_HTB_PARMS - 1] == NULL || | 1306 | tb[TCA_HTB_PARMS] == NULL || |
1307 | RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt)) | 1307 | nla_len(tb[TCA_HTB_PARMS]) < sizeof(*hopt)) |
1308 | goto failure; | 1308 | goto failure; |
1309 | 1309 | ||
1310 | parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); | 1310 | parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); |
1311 | 1311 | ||
1312 | hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]); | 1312 | hopt = nla_data(tb[TCA_HTB_PARMS]); |
1313 | 1313 | ||
1314 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]); | 1314 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); |
1315 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]); | 1315 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); |
1316 | if (!rtab || !ctab) | 1316 | if (!rtab || !ctab) |
1317 | goto failure; | 1317 | goto failure; |
1318 | 1318 | ||
@@ -1320,12 +1320,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1320 | struct Qdisc *new_q; | 1320 | struct Qdisc *new_q; |
1321 | int prio; | 1321 | int prio; |
1322 | struct { | 1322 | struct { |
1323 | struct rtattr rta; | 1323 | struct nlattr nla; |
1324 | struct gnet_estimator opt; | 1324 | struct gnet_estimator opt; |
1325 | } est = { | 1325 | } est = { |
1326 | .rta = { | 1326 | .nla = { |
1327 | .rta_len = RTA_LENGTH(sizeof(est.opt)), | 1327 | .nla_len = nla_attr_size(sizeof(est.opt)), |
1328 | .rta_type = TCA_RATE, | 1328 | .nla_type = TCA_RATE, |
1329 | }, | 1329 | }, |
1330 | .opt = { | 1330 | .opt = { |
1331 | /* 4s interval, 16s averaging constant */ | 1331 | /* 4s interval, 16s averaging constant */ |
@@ -1350,7 +1350,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1350 | 1350 | ||
1351 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1351 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1352 | &sch->dev->queue_lock, | 1352 | &sch->dev->queue_lock, |
1353 | tca[TCA_RATE-1] ? : &est.rta); | 1353 | tca[TCA_RATE] ? : &est.nla); |
1354 | cl->refcnt = 1; | 1354 | cl->refcnt = 1; |
1355 | INIT_LIST_HEAD(&cl->sibling); | 1355 | INIT_LIST_HEAD(&cl->sibling); |
1356 | INIT_HLIST_NODE(&cl->hlist); | 1356 | INIT_HLIST_NODE(&cl->hlist); |
@@ -1403,10 +1403,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1403 | list_add_tail(&cl->sibling, | 1403 | list_add_tail(&cl->sibling, |
1404 | parent ? &parent->children : &q->root); | 1404 | parent ? &parent->children : &q->root); |
1405 | } else { | 1405 | } else { |
1406 | if (tca[TCA_RATE-1]) | 1406 | if (tca[TCA_RATE]) |
1407 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1407 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1408 | &sch->dev->queue_lock, | 1408 | &sch->dev->queue_lock, |
1409 | tca[TCA_RATE-1]); | 1409 | tca[TCA_RATE]); |
1410 | sch_tree_lock(sch); | 1410 | sch_tree_lock(sch); |
1411 | } | 1411 | } |
1412 | 1412 | ||
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 72525710b66f..f6decbb56645 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -57,7 +57,7 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent, | 59 | static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent, |
60 | struct rtattr **tca, unsigned long *arg) | 60 | struct nlattr **tca, unsigned long *arg) |
61 | { | 61 | { |
62 | return 0; | 62 | return 0; |
63 | } | 63 | } |
@@ -156,7 +156,7 @@ static struct nf_hook_ops ing_ops[] __read_mostly = { | |||
156 | }; | 156 | }; |
157 | #endif | 157 | #endif |
158 | 158 | ||
159 | static int ingress_init(struct Qdisc *sch, struct rtattr *opt) | 159 | static int ingress_init(struct Qdisc *sch, struct nlattr *opt) |
160 | { | 160 | { |
161 | #if !defined(CONFIG_NET_CLS_ACT) && defined(CONFIG_NETFILTER) | 161 | #if !defined(CONFIG_NET_CLS_ACT) && defined(CONFIG_NETFILTER) |
162 | printk("Ingress scheduler: Classifier actions prefered over netfilter\n"); | 162 | printk("Ingress scheduler: Classifier actions prefered over netfilter\n"); |
@@ -184,14 +184,14 @@ static void ingress_destroy(struct Qdisc *sch) | |||
184 | static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) | 184 | static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) |
185 | { | 185 | { |
186 | unsigned char *b = skb_tail_pointer(skb); | 186 | unsigned char *b = skb_tail_pointer(skb); |
187 | struct rtattr *rta; | 187 | struct nlattr *nla; |
188 | 188 | ||
189 | rta = (struct rtattr *)b; | 189 | nla = (struct nlattr *)b; |
190 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 190 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
191 | rta->rta_len = skb_tail_pointer(skb) - b; | 191 | nla->nla_len = skb_tail_pointer(skb) - b; |
192 | return skb->len; | 192 | return skb->len; |
193 | 193 | ||
194 | rtattr_failure: | 194 | nla_put_failure: |
195 | nlmsg_trim(skb, b); | 195 | nlmsg_trim(skb, b); |
196 | return -1; | 196 | return -1; |
197 | } | 197 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 6c344ade33c2..a7b58df4546d 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -313,21 +313,21 @@ static void netem_reset(struct Qdisc *sch) | |||
313 | /* Pass size change message down to embedded FIFO */ | 313 | /* Pass size change message down to embedded FIFO */ |
314 | static int set_fifo_limit(struct Qdisc *q, int limit) | 314 | static int set_fifo_limit(struct Qdisc *q, int limit) |
315 | { | 315 | { |
316 | struct rtattr *rta; | 316 | struct nlattr *nla; |
317 | int ret = -ENOMEM; | 317 | int ret = -ENOMEM; |
318 | 318 | ||
319 | /* Hack to avoid sending change message to non-FIFO */ | 319 | /* Hack to avoid sending change message to non-FIFO */ |
320 | if (strncmp(q->ops->id + 1, "fifo", 4) != 0) | 320 | if (strncmp(q->ops->id + 1, "fifo", 4) != 0) |
321 | return 0; | 321 | return 0; |
322 | 322 | ||
323 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); | 323 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); |
324 | if (rta) { | 324 | if (nla) { |
325 | rta->rta_type = RTM_NEWQDISC; | 325 | nla->nla_type = RTM_NEWQDISC; |
326 | rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); | 326 | nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); |
327 | ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; | 327 | ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; |
328 | 328 | ||
329 | ret = q->ops->change(q, rta); | 329 | ret = q->ops->change(q, nla); |
330 | kfree(rta); | 330 | kfree(nla); |
331 | } | 331 | } |
332 | return ret; | 332 | return ret; |
333 | } | 333 | } |
@@ -336,11 +336,11 @@ static int set_fifo_limit(struct Qdisc *q, int limit) | |||
336 | * Distribution data is a variable size payload containing | 336 | * Distribution data is a variable size payload containing |
337 | * signed 16 bit values. | 337 | * signed 16 bit values. |
338 | */ | 338 | */ |
339 | static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr) | 339 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) |
340 | { | 340 | { |
341 | struct netem_sched_data *q = qdisc_priv(sch); | 341 | struct netem_sched_data *q = qdisc_priv(sch); |
342 | unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16); | 342 | unsigned long n = nla_len(attr)/sizeof(__s16); |
343 | const __s16 *data = RTA_DATA(attr); | 343 | const __s16 *data = nla_data(attr); |
344 | struct disttable *d; | 344 | struct disttable *d; |
345 | int i; | 345 | int i; |
346 | 346 | ||
@@ -363,12 +363,12 @@ static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr) | |||
363 | return 0; | 363 | return 0; |
364 | } | 364 | } |
365 | 365 | ||
366 | static int get_correlation(struct Qdisc *sch, const struct rtattr *attr) | 366 | static int get_correlation(struct Qdisc *sch, const struct nlattr *attr) |
367 | { | 367 | { |
368 | struct netem_sched_data *q = qdisc_priv(sch); | 368 | struct netem_sched_data *q = qdisc_priv(sch); |
369 | const struct tc_netem_corr *c = RTA_DATA(attr); | 369 | const struct tc_netem_corr *c = nla_data(attr); |
370 | 370 | ||
371 | if (RTA_PAYLOAD(attr) != sizeof(*c)) | 371 | if (nla_len(attr) != sizeof(*c)) |
372 | return -EINVAL; | 372 | return -EINVAL; |
373 | 373 | ||
374 | init_crandom(&q->delay_cor, c->delay_corr); | 374 | init_crandom(&q->delay_cor, c->delay_corr); |
@@ -377,12 +377,12 @@ static int get_correlation(struct Qdisc *sch, const struct rtattr *attr) | |||
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | static int get_reorder(struct Qdisc *sch, const struct rtattr *attr) | 380 | static int get_reorder(struct Qdisc *sch, const struct nlattr *attr) |
381 | { | 381 | { |
382 | struct netem_sched_data *q = qdisc_priv(sch); | 382 | struct netem_sched_data *q = qdisc_priv(sch); |
383 | const struct tc_netem_reorder *r = RTA_DATA(attr); | 383 | const struct tc_netem_reorder *r = nla_data(attr); |
384 | 384 | ||
385 | if (RTA_PAYLOAD(attr) != sizeof(*r)) | 385 | if (nla_len(attr) != sizeof(*r)) |
386 | return -EINVAL; | 386 | return -EINVAL; |
387 | 387 | ||
388 | q->reorder = r->probability; | 388 | q->reorder = r->probability; |
@@ -390,12 +390,12 @@ static int get_reorder(struct Qdisc *sch, const struct rtattr *attr) | |||
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
392 | 392 | ||
393 | static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr) | 393 | static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr) |
394 | { | 394 | { |
395 | struct netem_sched_data *q = qdisc_priv(sch); | 395 | struct netem_sched_data *q = qdisc_priv(sch); |
396 | const struct tc_netem_corrupt *r = RTA_DATA(attr); | 396 | const struct tc_netem_corrupt *r = nla_data(attr); |
397 | 397 | ||
398 | if (RTA_PAYLOAD(attr) != sizeof(*r)) | 398 | if (nla_len(attr) != sizeof(*r)) |
399 | return -EINVAL; | 399 | return -EINVAL; |
400 | 400 | ||
401 | q->corrupt = r->probability; | 401 | q->corrupt = r->probability; |
@@ -404,16 +404,16 @@ static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr) | |||
404 | } | 404 | } |
405 | 405 | ||
406 | /* Parse netlink message to set options */ | 406 | /* Parse netlink message to set options */ |
407 | static int netem_change(struct Qdisc *sch, struct rtattr *opt) | 407 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
408 | { | 408 | { |
409 | struct netem_sched_data *q = qdisc_priv(sch); | 409 | struct netem_sched_data *q = qdisc_priv(sch); |
410 | struct tc_netem_qopt *qopt; | 410 | struct tc_netem_qopt *qopt; |
411 | int ret; | 411 | int ret; |
412 | 412 | ||
413 | if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) | 413 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) |
414 | return -EINVAL; | 414 | return -EINVAL; |
415 | 415 | ||
416 | qopt = RTA_DATA(opt); | 416 | qopt = nla_data(opt); |
417 | ret = set_fifo_limit(q->qdisc, qopt->limit); | 417 | ret = set_fifo_limit(q->qdisc, qopt->limit); |
418 | if (ret) { | 418 | if (ret) { |
419 | pr_debug("netem: can't set fifo limit\n"); | 419 | pr_debug("netem: can't set fifo limit\n"); |
@@ -437,33 +437,33 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) | |||
437 | /* Handle nested options after initial queue options. | 437 | /* Handle nested options after initial queue options. |
438 | * Should have put all options in nested format but too late now. | 438 | * Should have put all options in nested format but too late now. |
439 | */ | 439 | */ |
440 | if (RTA_PAYLOAD(opt) > sizeof(*qopt)) { | 440 | if (nla_len(opt) > sizeof(*qopt)) { |
441 | struct rtattr *tb[TCA_NETEM_MAX]; | 441 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
442 | if (rtattr_parse(tb, TCA_NETEM_MAX, | 442 | if (nla_parse(tb, TCA_NETEM_MAX, |
443 | RTA_DATA(opt) + sizeof(*qopt), | 443 | nla_data(opt) + sizeof(*qopt), |
444 | RTA_PAYLOAD(opt) - sizeof(*qopt))) | 444 | nla_len(opt) - sizeof(*qopt), NULL)) |
445 | return -EINVAL; | 445 | return -EINVAL; |
446 | 446 | ||
447 | if (tb[TCA_NETEM_CORR-1]) { | 447 | if (tb[TCA_NETEM_CORR]) { |
448 | ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]); | 448 | ret = get_correlation(sch, tb[TCA_NETEM_CORR]); |
449 | if (ret) | 449 | if (ret) |
450 | return ret; | 450 | return ret; |
451 | } | 451 | } |
452 | 452 | ||
453 | if (tb[TCA_NETEM_DELAY_DIST-1]) { | 453 | if (tb[TCA_NETEM_DELAY_DIST]) { |
454 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]); | 454 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); |
455 | if (ret) | 455 | if (ret) |
456 | return ret; | 456 | return ret; |
457 | } | 457 | } |
458 | 458 | ||
459 | if (tb[TCA_NETEM_REORDER-1]) { | 459 | if (tb[TCA_NETEM_REORDER]) { |
460 | ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]); | 460 | ret = get_reorder(sch, tb[TCA_NETEM_REORDER]); |
461 | if (ret) | 461 | if (ret) |
462 | return ret; | 462 | return ret; |
463 | } | 463 | } |
464 | 464 | ||
465 | if (tb[TCA_NETEM_CORRUPT-1]) { | 465 | if (tb[TCA_NETEM_CORRUPT]) { |
466 | ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]); | 466 | ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); |
467 | if (ret) | 467 | if (ret) |
468 | return ret; | 468 | return ret; |
469 | } | 469 | } |
@@ -515,13 +515,13 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
515 | return qdisc_reshape_fail(nskb, sch); | 515 | return qdisc_reshape_fail(nskb, sch); |
516 | } | 516 | } |
517 | 517 | ||
518 | static int tfifo_init(struct Qdisc *sch, struct rtattr *opt) | 518 | static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) |
519 | { | 519 | { |
520 | struct fifo_sched_data *q = qdisc_priv(sch); | 520 | struct fifo_sched_data *q = qdisc_priv(sch); |
521 | 521 | ||
522 | if (opt) { | 522 | if (opt) { |
523 | struct tc_fifo_qopt *ctl = RTA_DATA(opt); | 523 | struct tc_fifo_qopt *ctl = nla_data(opt); |
524 | if (RTA_PAYLOAD(opt) < sizeof(*ctl)) | 524 | if (nla_len(opt) < sizeof(*ctl)) |
525 | return -EINVAL; | 525 | return -EINVAL; |
526 | 526 | ||
527 | q->limit = ctl->limit; | 527 | q->limit = ctl->limit; |
@@ -537,10 +537,10 @@ static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
537 | struct fifo_sched_data *q = qdisc_priv(sch); | 537 | struct fifo_sched_data *q = qdisc_priv(sch); |
538 | struct tc_fifo_qopt opt = { .limit = q->limit }; | 538 | struct tc_fifo_qopt opt = { .limit = q->limit }; |
539 | 539 | ||
540 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 540 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
541 | return skb->len; | 541 | return skb->len; |
542 | 542 | ||
543 | rtattr_failure: | 543 | nla_put_failure: |
544 | return -1; | 544 | return -1; |
545 | } | 545 | } |
546 | 546 | ||
@@ -557,7 +557,7 @@ static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { | |||
557 | .dump = tfifo_dump, | 557 | .dump = tfifo_dump, |
558 | }; | 558 | }; |
559 | 559 | ||
560 | static int netem_init(struct Qdisc *sch, struct rtattr *opt) | 560 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) |
561 | { | 561 | { |
562 | struct netem_sched_data *q = qdisc_priv(sch); | 562 | struct netem_sched_data *q = qdisc_priv(sch); |
563 | int ret; | 563 | int ret; |
@@ -595,7 +595,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
595 | { | 595 | { |
596 | const struct netem_sched_data *q = qdisc_priv(sch); | 596 | const struct netem_sched_data *q = qdisc_priv(sch); |
597 | unsigned char *b = skb_tail_pointer(skb); | 597 | unsigned char *b = skb_tail_pointer(skb); |
598 | struct rtattr *rta = (struct rtattr *) b; | 598 | struct nlattr *nla = (struct nlattr *) b; |
599 | struct tc_netem_qopt qopt; | 599 | struct tc_netem_qopt qopt; |
600 | struct tc_netem_corr cor; | 600 | struct tc_netem_corr cor; |
601 | struct tc_netem_reorder reorder; | 601 | struct tc_netem_reorder reorder; |
@@ -607,26 +607,26 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
607 | qopt.loss = q->loss; | 607 | qopt.loss = q->loss; |
608 | qopt.gap = q->gap; | 608 | qopt.gap = q->gap; |
609 | qopt.duplicate = q->duplicate; | 609 | qopt.duplicate = q->duplicate; |
610 | RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); | 610 | NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); |
611 | 611 | ||
612 | cor.delay_corr = q->delay_cor.rho; | 612 | cor.delay_corr = q->delay_cor.rho; |
613 | cor.loss_corr = q->loss_cor.rho; | 613 | cor.loss_corr = q->loss_cor.rho; |
614 | cor.dup_corr = q->dup_cor.rho; | 614 | cor.dup_corr = q->dup_cor.rho; |
615 | RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); | 615 | NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); |
616 | 616 | ||
617 | reorder.probability = q->reorder; | 617 | reorder.probability = q->reorder; |
618 | reorder.correlation = q->reorder_cor.rho; | 618 | reorder.correlation = q->reorder_cor.rho; |
619 | RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); | 619 | NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); |
620 | 620 | ||
621 | corrupt.probability = q->corrupt; | 621 | corrupt.probability = q->corrupt; |
622 | corrupt.correlation = q->corrupt_cor.rho; | 622 | corrupt.correlation = q->corrupt_cor.rho; |
623 | RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); | 623 | NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); |
624 | 624 | ||
625 | rta->rta_len = skb_tail_pointer(skb) - b; | 625 | nla->nla_len = skb_tail_pointer(skb) - b; |
626 | 626 | ||
627 | return skb->len; | 627 | return skb->len; |
628 | 628 | ||
629 | rtattr_failure: | 629 | nla_put_failure: |
630 | nlmsg_trim(skb, b); | 630 | nlmsg_trim(skb, b); |
631 | return -1; | 631 | return -1; |
632 | } | 632 | } |
@@ -678,7 +678,7 @@ static void netem_put(struct Qdisc *sch, unsigned long arg) | |||
678 | } | 678 | } |
679 | 679 | ||
680 | static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | 680 | static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid, |
681 | struct rtattr **tca, unsigned long *arg) | 681 | struct nlattr **tca, unsigned long *arg) |
682 | { | 682 | { |
683 | return -ENOSYS; | 683 | return -ENOSYS; |
684 | } | 684 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 2243aaa8d851..a4f932df86e9 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -224,15 +224,15 @@ prio_destroy(struct Qdisc* sch) | |||
224 | qdisc_destroy(q->queues[prio]); | 224 | qdisc_destroy(q->queues[prio]); |
225 | } | 225 | } |
226 | 226 | ||
227 | static int prio_tune(struct Qdisc *sch, struct rtattr *opt) | 227 | static int prio_tune(struct Qdisc *sch, struct nlattr *opt) |
228 | { | 228 | { |
229 | struct prio_sched_data *q = qdisc_priv(sch); | 229 | struct prio_sched_data *q = qdisc_priv(sch); |
230 | struct tc_prio_qopt *qopt; | 230 | struct tc_prio_qopt *qopt; |
231 | struct rtattr *tb[TCA_PRIO_MAX]; | 231 | struct nlattr *tb[TCA_PRIO_MAX + 1]; |
232 | int i; | 232 | int i; |
233 | 233 | ||
234 | if (rtattr_parse_nested_compat(tb, TCA_PRIO_MAX, opt, qopt, | 234 | if (nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt, |
235 | sizeof(*qopt))) | 235 | sizeof(*qopt))) |
236 | return -EINVAL; | 236 | return -EINVAL; |
237 | q->bands = qopt->bands; | 237 | q->bands = qopt->bands; |
238 | /* If we're multiqueue, make sure the number of incoming bands | 238 | /* If we're multiqueue, make sure the number of incoming bands |
@@ -242,7 +242,7 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt) | |||
242 | * only one that is enabled for multiqueue, since it's the only one | 242 | * only one that is enabled for multiqueue, since it's the only one |
243 | * that interacts with the underlying device. | 243 | * that interacts with the underlying device. |
244 | */ | 244 | */ |
245 | q->mq = RTA_GET_FLAG(tb[TCA_PRIO_MQ - 1]); | 245 | q->mq = nla_get_flag(tb[TCA_PRIO_MQ]); |
246 | if (q->mq) { | 246 | if (q->mq) { |
247 | if (sch->parent != TC_H_ROOT) | 247 | if (sch->parent != TC_H_ROOT) |
248 | return -EINVAL; | 248 | return -EINVAL; |
@@ -296,7 +296,7 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt) | |||
296 | return 0; | 296 | return 0; |
297 | } | 297 | } |
298 | 298 | ||
299 | static int prio_init(struct Qdisc *sch, struct rtattr *opt) | 299 | static int prio_init(struct Qdisc *sch, struct nlattr *opt) |
300 | { | 300 | { |
301 | struct prio_sched_data *q = qdisc_priv(sch); | 301 | struct prio_sched_data *q = qdisc_priv(sch); |
302 | int i; | 302 | int i; |
@@ -319,20 +319,24 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
319 | { | 319 | { |
320 | struct prio_sched_data *q = qdisc_priv(sch); | 320 | struct prio_sched_data *q = qdisc_priv(sch); |
321 | unsigned char *b = skb_tail_pointer(skb); | 321 | unsigned char *b = skb_tail_pointer(skb); |
322 | struct rtattr *nest; | 322 | struct nlattr *nest; |
323 | struct tc_prio_qopt opt; | 323 | struct tc_prio_qopt opt; |
324 | 324 | ||
325 | opt.bands = q->bands; | 325 | opt.bands = q->bands; |
326 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); | 326 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); |
327 | 327 | ||
328 | nest = RTA_NEST_COMPAT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 328 | nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); |
329 | if (q->mq) | 329 | if (nest == NULL) |
330 | RTA_PUT_FLAG(skb, TCA_PRIO_MQ); | 330 | goto nla_put_failure; |
331 | RTA_NEST_COMPAT_END(skb, nest); | 331 | if (q->mq) { |
332 | if (nla_put_flag(skb, TCA_PRIO_MQ) < 0) | ||
333 | goto nla_put_failure; | ||
334 | } | ||
335 | nla_nest_compat_end(skb, nest); | ||
332 | 336 | ||
333 | return skb->len; | 337 | return skb->len; |
334 | 338 | ||
335 | rtattr_failure: | 339 | nla_put_failure: |
336 | nlmsg_trim(skb, b); | 340 | nlmsg_trim(skb, b); |
337 | return -1; | 341 | return -1; |
338 | } | 342 | } |
@@ -392,7 +396,7 @@ static void prio_put(struct Qdisc *q, unsigned long cl) | |||
392 | return; | 396 | return; |
393 | } | 397 | } |
394 | 398 | ||
395 | static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg) | 399 | static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg) |
396 | { | 400 | { |
397 | unsigned long cl = *arg; | 401 | unsigned long cl = *arg; |
398 | struct prio_sched_data *q = qdisc_priv(sch); | 402 | struct prio_sched_data *q = qdisc_priv(sch); |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index f1e9647f7db7..6ce8da5aca0b 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -177,21 +177,21 @@ static void red_destroy(struct Qdisc *sch) | |||
177 | static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) | 177 | static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) |
178 | { | 178 | { |
179 | struct Qdisc *q; | 179 | struct Qdisc *q; |
180 | struct rtattr *rta; | 180 | struct nlattr *nla; |
181 | int ret; | 181 | int ret; |
182 | 182 | ||
183 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, | 183 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, |
184 | TC_H_MAKE(sch->handle, 1)); | 184 | TC_H_MAKE(sch->handle, 1)); |
185 | if (q) { | 185 | if (q) { |
186 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), | 186 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), |
187 | GFP_KERNEL); | 187 | GFP_KERNEL); |
188 | if (rta) { | 188 | if (nla) { |
189 | rta->rta_type = RTM_NEWQDISC; | 189 | nla->nla_type = RTM_NEWQDISC; |
190 | rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); | 190 | nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); |
191 | ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; | 191 | ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; |
192 | 192 | ||
193 | ret = q->ops->change(q, rta); | 193 | ret = q->ops->change(q, nla); |
194 | kfree(rta); | 194 | kfree(nla); |
195 | 195 | ||
196 | if (ret == 0) | 196 | if (ret == 0) |
197 | return q; | 197 | return q; |
@@ -201,23 +201,23 @@ static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) | |||
201 | return NULL; | 201 | return NULL; |
202 | } | 202 | } |
203 | 203 | ||
204 | static int red_change(struct Qdisc *sch, struct rtattr *opt) | 204 | static int red_change(struct Qdisc *sch, struct nlattr *opt) |
205 | { | 205 | { |
206 | struct red_sched_data *q = qdisc_priv(sch); | 206 | struct red_sched_data *q = qdisc_priv(sch); |
207 | struct rtattr *tb[TCA_RED_MAX]; | 207 | struct nlattr *tb[TCA_RED_MAX + 1]; |
208 | struct tc_red_qopt *ctl; | 208 | struct tc_red_qopt *ctl; |
209 | struct Qdisc *child = NULL; | 209 | struct Qdisc *child = NULL; |
210 | 210 | ||
211 | if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt)) | 211 | if (opt == NULL || nla_parse_nested(tb, TCA_RED_MAX, opt, NULL)) |
212 | return -EINVAL; | 212 | return -EINVAL; |
213 | 213 | ||
214 | if (tb[TCA_RED_PARMS-1] == NULL || | 214 | if (tb[TCA_RED_PARMS] == NULL || |
215 | RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) || | 215 | nla_len(tb[TCA_RED_PARMS]) < sizeof(*ctl) || |
216 | tb[TCA_RED_STAB-1] == NULL || | 216 | tb[TCA_RED_STAB] == NULL || |
217 | RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE) | 217 | nla_len(tb[TCA_RED_STAB]) < RED_STAB_SIZE) |
218 | return -EINVAL; | 218 | return -EINVAL; |
219 | 219 | ||
220 | ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); | 220 | ctl = nla_data(tb[TCA_RED_PARMS]); |
221 | 221 | ||
222 | if (ctl->limit > 0) { | 222 | if (ctl->limit > 0) { |
223 | child = red_create_dflt(sch, ctl->limit); | 223 | child = red_create_dflt(sch, ctl->limit); |
@@ -235,7 +235,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) | |||
235 | 235 | ||
236 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, | 236 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, |
237 | ctl->Plog, ctl->Scell_log, | 237 | ctl->Plog, ctl->Scell_log, |
238 | RTA_DATA(tb[TCA_RED_STAB-1])); | 238 | nla_data(tb[TCA_RED_STAB])); |
239 | 239 | ||
240 | if (skb_queue_empty(&sch->q)) | 240 | if (skb_queue_empty(&sch->q)) |
241 | red_end_of_idle_period(&q->parms); | 241 | red_end_of_idle_period(&q->parms); |
@@ -244,7 +244,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) | |||
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | static int red_init(struct Qdisc* sch, struct rtattr *opt) | 247 | static int red_init(struct Qdisc* sch, struct nlattr *opt) |
248 | { | 248 | { |
249 | struct red_sched_data *q = qdisc_priv(sch); | 249 | struct red_sched_data *q = qdisc_priv(sch); |
250 | 250 | ||
@@ -255,7 +255,7 @@ static int red_init(struct Qdisc* sch, struct rtattr *opt) | |||
255 | static int red_dump(struct Qdisc *sch, struct sk_buff *skb) | 255 | static int red_dump(struct Qdisc *sch, struct sk_buff *skb) |
256 | { | 256 | { |
257 | struct red_sched_data *q = qdisc_priv(sch); | 257 | struct red_sched_data *q = qdisc_priv(sch); |
258 | struct rtattr *opts = NULL; | 258 | struct nlattr *opts = NULL; |
259 | struct tc_red_qopt opt = { | 259 | struct tc_red_qopt opt = { |
260 | .limit = q->limit, | 260 | .limit = q->limit, |
261 | .flags = q->flags, | 261 | .flags = q->flags, |
@@ -266,12 +266,14 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
266 | .Scell_log = q->parms.Scell_log, | 266 | .Scell_log = q->parms.Scell_log, |
267 | }; | 267 | }; |
268 | 268 | ||
269 | opts = RTA_NEST(skb, TCA_OPTIONS); | 269 | opts = nla_nest_start(skb, TCA_OPTIONS); |
270 | RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); | 270 | if (opts == NULL) |
271 | return RTA_NEST_END(skb, opts); | 271 | goto nla_put_failure; |
272 | NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); | ||
273 | return nla_nest_end(skb, opts); | ||
272 | 274 | ||
273 | rtattr_failure: | 275 | nla_put_failure: |
274 | return RTA_NEST_CANCEL(skb, opts); | 276 | return nla_nest_cancel(skb, opts); |
275 | } | 277 | } |
276 | 278 | ||
277 | static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | 279 | static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) |
@@ -332,7 +334,7 @@ static void red_put(struct Qdisc *sch, unsigned long arg) | |||
332 | } | 334 | } |
333 | 335 | ||
334 | static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | 336 | static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid, |
335 | struct rtattr **tca, unsigned long *arg) | 337 | struct nlattr **tca, unsigned long *arg) |
336 | { | 338 | { |
337 | return -ENOSYS; | 339 | return -ENOSYS; |
338 | } | 340 | } |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 25afe0f1d83a..91af539ab6e6 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -397,13 +397,13 @@ static void sfq_perturbation(unsigned long arg) | |||
397 | mod_timer(&q->perturb_timer, jiffies + q->perturb_period); | 397 | mod_timer(&q->perturb_timer, jiffies + q->perturb_period); |
398 | } | 398 | } |
399 | 399 | ||
400 | static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | 400 | static int sfq_change(struct Qdisc *sch, struct nlattr *opt) |
401 | { | 401 | { |
402 | struct sfq_sched_data *q = qdisc_priv(sch); | 402 | struct sfq_sched_data *q = qdisc_priv(sch); |
403 | struct tc_sfq_qopt *ctl = RTA_DATA(opt); | 403 | struct tc_sfq_qopt *ctl = nla_data(opt); |
404 | unsigned int qlen; | 404 | unsigned int qlen; |
405 | 405 | ||
406 | if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) | 406 | if (opt->nla_len < nla_attr_size(sizeof(*ctl))) |
407 | return -EINVAL; | 407 | return -EINVAL; |
408 | 408 | ||
409 | sch_tree_lock(sch); | 409 | sch_tree_lock(sch); |
@@ -426,7 +426,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | |||
426 | return 0; | 426 | return 0; |
427 | } | 427 | } |
428 | 428 | ||
429 | static int sfq_init(struct Qdisc *sch, struct rtattr *opt) | 429 | static int sfq_init(struct Qdisc *sch, struct nlattr *opt) |
430 | { | 430 | { |
431 | struct sfq_sched_data *q = qdisc_priv(sch); | 431 | struct sfq_sched_data *q = qdisc_priv(sch); |
432 | int i; | 432 | int i; |
@@ -481,11 +481,11 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
481 | opt.divisor = SFQ_HASH_DIVISOR; | 481 | opt.divisor = SFQ_HASH_DIVISOR; |
482 | opt.flows = q->limit; | 482 | opt.flows = q->limit; |
483 | 483 | ||
484 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 484 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
485 | 485 | ||
486 | return skb->len; | 486 | return skb->len; |
487 | 487 | ||
488 | rtattr_failure: | 488 | nla_put_failure: |
489 | nlmsg_trim(skb, b); | 489 | nlmsg_trim(skb, b); |
490 | return -1; | 490 | return -1; |
491 | } | 491 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index d88fea9d6b61..6c4ad7e677b3 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -245,20 +245,21 @@ static void tbf_reset(struct Qdisc* sch) | |||
245 | static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) | 245 | static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) |
246 | { | 246 | { |
247 | struct Qdisc *q; | 247 | struct Qdisc *q; |
248 | struct rtattr *rta; | 248 | struct nlattr *nla; |
249 | int ret; | 249 | int ret; |
250 | 250 | ||
251 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, | 251 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, |
252 | TC_H_MAKE(sch->handle, 1)); | 252 | TC_H_MAKE(sch->handle, 1)); |
253 | if (q) { | 253 | if (q) { |
254 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); | 254 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), |
255 | if (rta) { | 255 | GFP_KERNEL); |
256 | rta->rta_type = RTM_NEWQDISC; | 256 | if (nla) { |
257 | rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); | 257 | nla->nla_type = RTM_NEWQDISC; |
258 | ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; | 258 | nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); |
259 | ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; | ||
259 | 260 | ||
260 | ret = q->ops->change(q, rta); | 261 | ret = q->ops->change(q, nla); |
261 | kfree(rta); | 262 | kfree(nla); |
262 | 263 | ||
263 | if (ret == 0) | 264 | if (ret == 0) |
264 | return q; | 265 | return q; |
@@ -269,30 +270,30 @@ static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) | |||
269 | return NULL; | 270 | return NULL; |
270 | } | 271 | } |
271 | 272 | ||
272 | static int tbf_change(struct Qdisc* sch, struct rtattr *opt) | 273 | static int tbf_change(struct Qdisc* sch, struct nlattr *opt) |
273 | { | 274 | { |
274 | int err = -EINVAL; | 275 | int err = -EINVAL; |
275 | struct tbf_sched_data *q = qdisc_priv(sch); | 276 | struct tbf_sched_data *q = qdisc_priv(sch); |
276 | struct rtattr *tb[TCA_TBF_PTAB]; | 277 | struct nlattr *tb[TCA_TBF_PTAB + 1]; |
277 | struct tc_tbf_qopt *qopt; | 278 | struct tc_tbf_qopt *qopt; |
278 | struct qdisc_rate_table *rtab = NULL; | 279 | struct qdisc_rate_table *rtab = NULL; |
279 | struct qdisc_rate_table *ptab = NULL; | 280 | struct qdisc_rate_table *ptab = NULL; |
280 | struct Qdisc *child = NULL; | 281 | struct Qdisc *child = NULL; |
281 | int max_size,n; | 282 | int max_size,n; |
282 | 283 | ||
283 | if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) || | 284 | if (nla_parse_nested(tb, TCA_TBF_PTAB, opt, NULL) || |
284 | tb[TCA_TBF_PARMS-1] == NULL || | 285 | tb[TCA_TBF_PARMS] == NULL || |
285 | RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt)) | 286 | nla_len(tb[TCA_TBF_PARMS]) < sizeof(*qopt)) |
286 | goto done; | 287 | goto done; |
287 | 288 | ||
288 | qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]); | 289 | qopt = nla_data(tb[TCA_TBF_PARMS]); |
289 | rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]); | 290 | rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); |
290 | if (rtab == NULL) | 291 | if (rtab == NULL) |
291 | goto done; | 292 | goto done; |
292 | 293 | ||
293 | if (qopt->peakrate.rate) { | 294 | if (qopt->peakrate.rate) { |
294 | if (qopt->peakrate.rate > qopt->rate.rate) | 295 | if (qopt->peakrate.rate > qopt->rate.rate) |
295 | ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]); | 296 | ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); |
296 | if (ptab == NULL) | 297 | if (ptab == NULL) |
297 | goto done; | 298 | goto done; |
298 | } | 299 | } |
@@ -339,7 +340,7 @@ done: | |||
339 | return err; | 340 | return err; |
340 | } | 341 | } |
341 | 342 | ||
342 | static int tbf_init(struct Qdisc* sch, struct rtattr *opt) | 343 | static int tbf_init(struct Qdisc* sch, struct nlattr *opt) |
343 | { | 344 | { |
344 | struct tbf_sched_data *q = qdisc_priv(sch); | 345 | struct tbf_sched_data *q = qdisc_priv(sch); |
345 | 346 | ||
@@ -371,11 +372,11 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
371 | { | 372 | { |
372 | struct tbf_sched_data *q = qdisc_priv(sch); | 373 | struct tbf_sched_data *q = qdisc_priv(sch); |
373 | unsigned char *b = skb_tail_pointer(skb); | 374 | unsigned char *b = skb_tail_pointer(skb); |
374 | struct rtattr *rta; | 375 | struct nlattr *nla; |
375 | struct tc_tbf_qopt opt; | 376 | struct tc_tbf_qopt opt; |
376 | 377 | ||
377 | rta = (struct rtattr*)b; | 378 | nla = (struct nlattr*)b; |
378 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 379 | NLA_PUT(skb, TCA_OPTIONS, 0, NULL); |
379 | 380 | ||
380 | opt.limit = q->limit; | 381 | opt.limit = q->limit; |
381 | opt.rate = q->R_tab->rate; | 382 | opt.rate = q->R_tab->rate; |
@@ -385,12 +386,12 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
385 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); | 386 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); |
386 | opt.mtu = q->mtu; | 387 | opt.mtu = q->mtu; |
387 | opt.buffer = q->buffer; | 388 | opt.buffer = q->buffer; |
388 | RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); | 389 | NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); |
389 | rta->rta_len = skb_tail_pointer(skb) - b; | 390 | nla->nla_len = skb_tail_pointer(skb) - b; |
390 | 391 | ||
391 | return skb->len; | 392 | return skb->len; |
392 | 393 | ||
393 | rtattr_failure: | 394 | nla_put_failure: |
394 | nlmsg_trim(skb, b); | 395 | nlmsg_trim(skb, b); |
395 | return -1; | 396 | return -1; |
396 | } | 397 | } |
@@ -442,7 +443,7 @@ static void tbf_put(struct Qdisc *sch, unsigned long arg) | |||
442 | } | 443 | } |
443 | 444 | ||
444 | static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | 445 | static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, |
445 | struct rtattr **tca, unsigned long *arg) | 446 | struct nlattr **tca, unsigned long *arg) |
446 | { | 447 | { |
447 | return -ENOSYS; | 448 | return -ENOSYS; |
448 | } | 449 | } |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index c0ed06d4a504..1411c7b1fbdc 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -168,7 +168,7 @@ teql_destroy(struct Qdisc* sch) | |||
168 | } | 168 | } |
169 | } | 169 | } |
170 | 170 | ||
171 | static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt) | 171 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) |
172 | { | 172 | { |
173 | struct net_device *dev = sch->dev; | 173 | struct net_device *dev = sch->dev; |
174 | struct teql_master *m = (struct teql_master*)sch->ops; | 174 | struct teql_master *m = (struct teql_master*)sch->ops; |