aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-09-23 15:56:47 -0400
committerDavid S. Miller <davem@davemloft.net>2015-09-23 17:29:02 -0400
commitef146fa40cc3af1dcb13688f41cc06a9c0deb01c (patch)
treeae47e40a2cd044d23f2317e0e8eee01e4e5ffe8b /net/sched
parentbf007d1c75aceb4e8a407f325d42ee9958049a31 (diff)
cls_bpf: make binding to classid optional
The binding to a particular classid was so far always mandatory for cls_bpf, but it doesn't need to be. Therefore, lift this restriction as similarly done in other classifiers. Only a couple of qdiscs make use of class from the tcf_result, others don't strictly care, so let the user choose his needs (those that read out class can handle situations where it could be NULL). An explicit check for tcf_unbind_filter() is also not needed here, as the previous r->class was 0, so the xchg() will return that and therefore a callback to the qdisc's unbind_tcf() is skipped. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_bpf.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 7d9241545188..d6c0a0b44afb 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -307,14 +307,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
307{ 307{
308 bool is_bpf, is_ebpf, have_exts = false; 308 bool is_bpf, is_ebpf, have_exts = false;
309 struct tcf_exts exts; 309 struct tcf_exts exts;
310 u32 classid;
311 int ret; 310 int ret;
312 311
313 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; 312 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
314 is_ebpf = tb[TCA_BPF_FD]; 313 is_ebpf = tb[TCA_BPF_FD];
315 314 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
316 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
317 !tb[TCA_BPF_CLASSID])
318 return -EINVAL; 315 return -EINVAL;
319 316
320 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); 317 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
@@ -322,7 +319,6 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
322 if (ret < 0) 319 if (ret < 0)
323 return ret; 320 return ret;
324 321
325 classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
326 if (tb[TCA_BPF_FLAGS]) { 322 if (tb[TCA_BPF_FLAGS]) {
327 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); 323 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
328 324
@@ -334,7 +330,6 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
334 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; 330 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
335 } 331 }
336 332
337 prog->res.classid = classid;
338 prog->exts_integrated = have_exts; 333 prog->exts_integrated = have_exts;
339 334
340 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 335 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
@@ -344,9 +339,12 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
344 return ret; 339 return ret;
345 } 340 }
346 341
347 tcf_bind_filter(tp, &prog->res, base); 342 if (tb[TCA_BPF_CLASSID]) {
348 tcf_exts_change(tp, &prog->exts, &exts); 343 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
344 tcf_bind_filter(tp, &prog->res, base);
345 }
349 346
347 tcf_exts_change(tp, &prog->exts, &exts);
350 return 0; 348 return 0;
351} 349}
352 350
@@ -479,7 +477,8 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
479 if (nest == NULL) 477 if (nest == NULL)
480 goto nla_put_failure; 478 goto nla_put_failure;
481 479
482 if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) 480 if (prog->res.classid &&
481 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
483 goto nla_put_failure; 482 goto nla_put_failure;
484 483
485 if (cls_bpf_is_ebpf(prog)) 484 if (cls_bpf_is_ebpf(prog))