aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/cls_tcindex.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-01-23 23:36:12 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:11:23 -0500
commit6fa8c0144b770dac941cf2c15053b6e24f046c8a (patch)
tree7c91cd0e9a98f0532df30a9d8549e2c76a3ce716 /net/sched/cls_tcindex.c
parent27a3421e4821734bc19496faa77b380605dc3b23 (diff)
[NET_SCHED]: Use nla_policy for attribute validation in classifiers
Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_tcindex.c')
-rw-r--r--net/sched/cls_tcindex.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 28098564b4d7..ee60b2d1705d 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -193,6 +193,14 @@ valid_perfect_hash(struct tcindex_data *p)
193 return p->hash > (p->mask >> p->shift); 193 return p->hash > (p->mask >> p->shift);
194} 194}
195 195
196static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
197 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
198 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
199 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
200 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
201 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
202};
203
196static int 204static int
197tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, 205tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
198 struct tcindex_data *p, struct tcindex_filter_result *r, 206 struct tcindex_data *p, struct tcindex_filter_result *r,
@@ -217,24 +225,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
217 else 225 else
218 memset(&cr, 0, sizeof(cr)); 226 memset(&cr, 0, sizeof(cr));
219 227
220 err = -EINVAL; 228 if (tb[TCA_TCINDEX_HASH])
221 if (tb[TCA_TCINDEX_HASH]) {
222 if (nla_len(tb[TCA_TCINDEX_HASH]) < sizeof(u32))
223 goto errout;
224 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); 229 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
225 }
226 230
227 if (tb[TCA_TCINDEX_MASK]) { 231 if (tb[TCA_TCINDEX_MASK])
228 if (nla_len(tb[TCA_TCINDEX_MASK]) < sizeof(u16))
229 goto errout;
230 cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); 232 cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
231 }
232 233
233 if (tb[TCA_TCINDEX_SHIFT]) { 234 if (tb[TCA_TCINDEX_SHIFT])
234 if (nla_len(tb[TCA_TCINDEX_SHIFT]) < sizeof(int))
235 goto errout;
236 cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); 235 cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
237 }
238 236
239 err = -EBUSY; 237 err = -EBUSY;
240 /* Hash already allocated, make sure that we still meet the 238 /* Hash already allocated, make sure that we still meet the
@@ -248,11 +246,8 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
248 goto errout; 246 goto errout;
249 247
250 err = -EINVAL; 248 err = -EINVAL;
251 if (tb[TCA_TCINDEX_FALL_THROUGH]) { 249 if (tb[TCA_TCINDEX_FALL_THROUGH])
252 if (nla_len(tb[TCA_TCINDEX_FALL_THROUGH]) < sizeof(u32))
253 goto errout;
254 cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); 250 cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
255 }
256 251
257 if (!cp.hash) { 252 if (!cp.hash) {
258 /* Hash not specified, use perfect hash if the upper limit 253 /* Hash not specified, use perfect hash if the upper limit
@@ -358,7 +353,7 @@ tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
358 if (!opt) 353 if (!opt)
359 return 0; 354 return 0;
360 355
361 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, NULL); 356 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
362 if (err < 0) 357 if (err < 0)
363 return err; 358 return err;
364 359