aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/cls_api.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-25 14:17:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-25 14:17:34 -0500
commit4ba9920e5e9c0e16b5ed24292d45322907bb9035 (patch)
tree7d023baea59ed0886ded1f0b6d1c6385690b88f7 /net/sched/cls_api.c
parent82c477669a4665eb4e52030792051e0559ee2a36 (diff)
parent8b662fe70c68282f78482dc272df0c4f355e49f5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BPF debugger and asm tool by Daniel Borkmann. 2) Speed up create/bind in AF_PACKET, also from Daniel Borkmann. 3) Correct reciprocal_divide and update users, from Hannes Frederic Sowa and Daniel Borkmann. 4) Currently we only have a "set" operation for the hw timestamp socket ioctl, add a "get" operation to match. From Ben Hutchings. 5) Add better trace events for debugging driver datapath problems, also from Ben Hutchings. 6) Implement auto corking in TCP, from Eric Dumazet. Basically, if we have a small send and a previous packet is already in the qdisc or device queue, defer until TX completion or we get more data. 7) Allow userspace to manage ipv6 temporary addresses, from Jiri Pirko. 8) Add a qdisc bypass option for AF_PACKET sockets, from Daniel Borkmann. 9) Share IP header compression code between Bluetooth and IEEE802154 layers, from Jukka Rissanen. 10) Fix ipv6 router reachability probing, from Jiri Benc. 11) Allow packets to be captured on macvtap devices, from Vlad Yasevich. 12) Support tunneling in GRO layer, from Jerry Chu. 13) Allow bonding to be configured fully using netlink, from Scott Feldman. 14) Allow AF_PACKET users to obtain the VLAN TPID, just like they can already get the TCI. From Atzm Watanabe. 15) New "Heavy Hitter" qdisc, from Terry Lam. 16) Significantly improve the IPSEC support in pktgen, from Fan Du. 17) Allow ipv4 tunnels to cache routes, just like sockets. From Tom Herbert. 18) Add Proportional Integral Enhanced packet scheduler, from Vijay Subramanian. 19) Allow openvswitch to mmap'd netlink, from Thomas Graf. 20) Key TCP metrics blobs also by source address, not just destination address. From Christoph Paasch. 21) Support 10G in generic phylib. From Andy Fleming. 22) Try to short-circuit GRO flow compares using device provided RX hash, if provided. From Tom Herbert. The wireless and netfilter folks have been busy little bees too. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2064 commits) net/cxgb4: Fix referencing freed adapter ipv6: reallocate addrconf router for ipv6 address when lo device up fib_frontend: fix possible NULL pointer dereference rtnetlink: remove IFLA_BOND_SLAVE definition rtnetlink: remove check for fill_slave_info in rtnl_have_link_slave_info qlcnic: update version to 5.3.55 qlcnic: Enhance logic to calculate msix vectors. qlcnic: Refactor interrupt coalescing code for all adapters. qlcnic: Update poll controller code path qlcnic: Interrupt code cleanup qlcnic: Enhance Tx timeout debugging. qlcnic: Use bool for rx_mac_learn. bonding: fix u64 division rtnetlink: add missing IFLA_BOND_AD_INFO_UNSPEC sfc: Use the correct maximum TX DMA ring size for SFC9100 Add Shradha Shah as the sfc driver maintainer. net/vxlan: Share RX skb de-marking and checksum checks with ovs tulip: cleanup by using ARRAY_SIZE() ip_tunnel: clear IPCB in ip_tunnel_xmit() in case dst_link_failure() is called net/cxgb4: Don't retrieve stats during recovery ...
Diffstat (limited to 'net/sched/cls_api.c')
-rw-r--r--net/sched/cls_api.c130
1 files changed, 60 insertions, 70 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8e118af90973..29a30a14c315 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,8 +31,7 @@
31#include <net/pkt_cls.h> 31#include <net/pkt_cls.h>
32 32
33/* The list of all installed classifier types */ 33/* The list of all installed classifier types */
34 34static LIST_HEAD(tcf_proto_base);
35static struct tcf_proto_ops *tcf_proto_base __read_mostly;
36 35
37/* Protects list of registered TC modules. It is pure SMP lock. */ 36/* Protects list of registered TC modules. It is pure SMP lock. */
38static DEFINE_RWLOCK(cls_mod_lock); 37static DEFINE_RWLOCK(cls_mod_lock);
@@ -41,36 +40,35 @@ static DEFINE_RWLOCK(cls_mod_lock);
41 40
42static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind) 41static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind)
43{ 42{
44 const struct tcf_proto_ops *t = NULL; 43 const struct tcf_proto_ops *t, *res = NULL;
45 44
46 if (kind) { 45 if (kind) {
47 read_lock(&cls_mod_lock); 46 read_lock(&cls_mod_lock);
48 for (t = tcf_proto_base; t; t = t->next) { 47 list_for_each_entry(t, &tcf_proto_base, head) {
49 if (nla_strcmp(kind, t->kind) == 0) { 48 if (nla_strcmp(kind, t->kind) == 0) {
50 if (!try_module_get(t->owner)) 49 if (try_module_get(t->owner))
51 t = NULL; 50 res = t;
52 break; 51 break;
53 } 52 }
54 } 53 }
55 read_unlock(&cls_mod_lock); 54 read_unlock(&cls_mod_lock);
56 } 55 }
57 return t; 56 return res;
58} 57}
59 58
60/* Register(unregister) new classifier type */ 59/* Register(unregister) new classifier type */
61 60
62int register_tcf_proto_ops(struct tcf_proto_ops *ops) 61int register_tcf_proto_ops(struct tcf_proto_ops *ops)
63{ 62{
64 struct tcf_proto_ops *t, **tp; 63 struct tcf_proto_ops *t;
65 int rc = -EEXIST; 64 int rc = -EEXIST;
66 65
67 write_lock(&cls_mod_lock); 66 write_lock(&cls_mod_lock);
68 for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) 67 list_for_each_entry(t, &tcf_proto_base, head)
69 if (!strcmp(ops->kind, t->kind)) 68 if (!strcmp(ops->kind, t->kind))
70 goto out; 69 goto out;
71 70
72 ops->next = NULL; 71 list_add_tail(&ops->head, &tcf_proto_base);
73 *tp = ops;
74 rc = 0; 72 rc = 0;
75out: 73out:
76 write_unlock(&cls_mod_lock); 74 write_unlock(&cls_mod_lock);
@@ -80,19 +78,17 @@ EXPORT_SYMBOL(register_tcf_proto_ops);
80 78
81int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 79int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
82{ 80{
83 struct tcf_proto_ops *t, **tp; 81 struct tcf_proto_ops *t;
84 int rc = -ENOENT; 82 int rc = -ENOENT;
85 83
86 write_lock(&cls_mod_lock); 84 write_lock(&cls_mod_lock);
87 for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) 85 list_for_each_entry(t, &tcf_proto_base, head) {
88 if (t == ops) 86 if (t == ops) {
87 list_del(&t->head);
88 rc = 0;
89 break; 89 break;
90 90 }
91 if (!t) 91 }
92 goto out;
93 *tp = t->next;
94 rc = 0;
95out:
96 write_unlock(&cls_mod_lock); 92 write_unlock(&cls_mod_lock);
97 return rc; 93 return rc;
98} 94}
@@ -344,7 +340,7 @@ errout:
344 return err; 340 return err;
345} 341}
346 342
347static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, 343static int tcf_fill_node(struct net *net, struct sk_buff *skb, struct tcf_proto *tp,
348 unsigned long fh, u32 portid, u32 seq, u16 flags, int event) 344 unsigned long fh, u32 portid, u32 seq, u16 flags, int event)
349{ 345{
350 struct tcmsg *tcm; 346 struct tcmsg *tcm;
@@ -366,7 +362,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
366 tcm->tcm_handle = fh; 362 tcm->tcm_handle = fh;
367 if (RTM_DELTFILTER != event) { 363 if (RTM_DELTFILTER != event) {
368 tcm->tcm_handle = 0; 364 tcm->tcm_handle = 0;
369 if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0) 365 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
370 goto nla_put_failure; 366 goto nla_put_failure;
371 } 367 }
372 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 368 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -389,7 +385,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
389 if (!skb) 385 if (!skb)
390 return -ENOBUFS; 386 return -ENOBUFS;
391 387
392 if (tcf_fill_node(skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { 388 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
393 kfree_skb(skb); 389 kfree_skb(skb);
394 return -EINVAL; 390 return -EINVAL;
395 } 391 }
@@ -408,8 +404,9 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
408 struct tcf_walker *arg) 404 struct tcf_walker *arg)
409{ 405{
410 struct tcf_dump_args *a = (void *)arg; 406 struct tcf_dump_args *a = (void *)arg;
407 struct net *net = sock_net(a->skb->sk);
411 408
412 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).portid, 409 return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
413 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); 410 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
414} 411}
415 412
@@ -467,7 +464,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
467 if (t > s_t) 464 if (t > s_t)
468 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 465 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
469 if (cb->args[1] == 0) { 466 if (cb->args[1] == 0) {
470 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).portid, 467 if (tcf_fill_node(net, skb, tp, 0, NETLINK_CB(cb->skb).portid,
471 cb->nlh->nlmsg_seq, NLM_F_MULTI, 468 cb->nlh->nlmsg_seq, NLM_F_MULTI,
472 RTM_NEWTFILTER) <= 0) 469 RTM_NEWTFILTER) <= 0)
473 break; 470 break;
@@ -500,46 +497,41 @@ out:
500void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts) 497void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
501{ 498{
502#ifdef CONFIG_NET_CLS_ACT 499#ifdef CONFIG_NET_CLS_ACT
503 if (exts->action) { 500 tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND);
504 tcf_action_destroy(exts->action, TCA_ACT_UNBIND); 501 INIT_LIST_HEAD(&exts->actions);
505 exts->action = NULL;
506 }
507#endif 502#endif
508} 503}
509EXPORT_SYMBOL(tcf_exts_destroy); 504EXPORT_SYMBOL(tcf_exts_destroy);
510 505
511int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 506int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
512 struct nlattr *rate_tlv, struct tcf_exts *exts, 507 struct nlattr *rate_tlv, struct tcf_exts *exts)
513 const struct tcf_ext_map *map)
514{ 508{
515 memset(exts, 0, sizeof(*exts));
516
517#ifdef CONFIG_NET_CLS_ACT 509#ifdef CONFIG_NET_CLS_ACT
518 { 510 {
519 struct tc_action *act; 511 struct tc_action *act;
520 512
521 if (map->police && tb[map->police]) { 513 INIT_LIST_HEAD(&exts->actions);
522 act = tcf_action_init_1(net, tb[map->police], rate_tlv, 514 if (exts->police && tb[exts->police]) {
515 act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
523 "police", TCA_ACT_NOREPLACE, 516 "police", TCA_ACT_NOREPLACE,
524 TCA_ACT_BIND); 517 TCA_ACT_BIND);
525 if (IS_ERR(act)) 518 if (IS_ERR(act))
526 return PTR_ERR(act); 519 return PTR_ERR(act);
527 520
528 act->type = TCA_OLD_COMPAT; 521 act->type = exts->type = TCA_OLD_COMPAT;
529 exts->action = act; 522 list_add(&act->list, &exts->actions);
530 } else if (map->action && tb[map->action]) { 523 } else if (exts->action && tb[exts->action]) {
531 act = tcf_action_init(net, tb[map->action], rate_tlv, 524 int err;
525 err = tcf_action_init(net, tb[exts->action], rate_tlv,
532 NULL, TCA_ACT_NOREPLACE, 526 NULL, TCA_ACT_NOREPLACE,
533 TCA_ACT_BIND); 527 TCA_ACT_BIND, &exts->actions);
534 if (IS_ERR(act)) 528 if (err)
535 return PTR_ERR(act); 529 return err;
536
537 exts->action = act;
538 } 530 }
539 } 531 }
540#else 532#else
541 if ((map->action && tb[map->action]) || 533 if ((exts->action && tb[exts->action]) ||
542 (map->police && tb[map->police])) 534 (exts->police && tb[exts->police]))
543 return -EOPNOTSUPP; 535 return -EOPNOTSUPP;
544#endif 536#endif
545 537
@@ -551,43 +543,44 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
551 struct tcf_exts *src) 543 struct tcf_exts *src)
552{ 544{
553#ifdef CONFIG_NET_CLS_ACT 545#ifdef CONFIG_NET_CLS_ACT
554 if (src->action) { 546 if (!list_empty(&src->actions)) {
555 struct tc_action *act; 547 LIST_HEAD(tmp);
556 tcf_tree_lock(tp); 548 tcf_tree_lock(tp);
557 act = dst->action; 549 list_splice_init(&dst->actions, &tmp);
558 dst->action = src->action; 550 list_splice(&src->actions, &dst->actions);
559 tcf_tree_unlock(tp); 551 tcf_tree_unlock(tp);
560 if (act) 552 tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
561 tcf_action_destroy(act, TCA_ACT_UNBIND);
562 } 553 }
563#endif 554#endif
564} 555}
565EXPORT_SYMBOL(tcf_exts_change); 556EXPORT_SYMBOL(tcf_exts_change);
566 557
567int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, 558#define tcf_exts_first_act(ext) \
568 const struct tcf_ext_map *map) 559 list_first_entry(&(exts)->actions, struct tc_action, list)
560
561int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
569{ 562{
570#ifdef CONFIG_NET_CLS_ACT 563#ifdef CONFIG_NET_CLS_ACT
571 if (map->action && exts->action) { 564 if (exts->action && !list_empty(&exts->actions)) {
572 /* 565 /*
573 * again for backward compatible mode - we want 566 * again for backward compatible mode - we want
574 * to work with both old and new modes of entering 567 * to work with both old and new modes of entering
575 * tc data even if iproute2 was newer - jhs 568 * tc data even if iproute2 was newer - jhs
576 */ 569 */
577 struct nlattr *nest; 570 struct nlattr *nest;
578 571 if (exts->type != TCA_OLD_COMPAT) {
579 if (exts->action->type != TCA_OLD_COMPAT) { 572 nest = nla_nest_start(skb, exts->action);
580 nest = nla_nest_start(skb, map->action);
581 if (nest == NULL) 573 if (nest == NULL)
582 goto nla_put_failure; 574 goto nla_put_failure;
583 if (tcf_action_dump(skb, exts->action, 0, 0) < 0) 575 if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0)
584 goto nla_put_failure; 576 goto nla_put_failure;
585 nla_nest_end(skb, nest); 577 nla_nest_end(skb, nest);
586 } else if (map->police) { 578 } else if (exts->police) {
587 nest = nla_nest_start(skb, map->police); 579 struct tc_action *act = tcf_exts_first_act(exts);
588 if (nest == NULL) 580 nest = nla_nest_start(skb, exts->police);
581 if (nest == NULL || !act)
589 goto nla_put_failure; 582 goto nla_put_failure;
590 if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0) 583 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
591 goto nla_put_failure; 584 goto nla_put_failure;
592 nla_nest_end(skb, nest); 585 nla_nest_end(skb, nest);
593 } 586 }
@@ -600,17 +593,14 @@ nla_put_failure: __attribute__ ((unused))
600EXPORT_SYMBOL(tcf_exts_dump); 593EXPORT_SYMBOL(tcf_exts_dump);
601 594
602 595
603int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, 596int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
604 const struct tcf_ext_map *map)
605{ 597{
606#ifdef CONFIG_NET_CLS_ACT 598#ifdef CONFIG_NET_CLS_ACT
607 if (exts->action) 599 struct tc_action *a = tcf_exts_first_act(exts);
608 if (tcf_action_copy_stats(skb, exts->action, 1) < 0) 600 if (tcf_action_copy_stats(skb, a, 1) < 0)
609 goto nla_put_failure; 601 return -1;
610#endif 602#endif
611 return 0; 603 return 0;
612nla_put_failure: __attribute__ ((unused))
613 return -1;
614} 604}
615EXPORT_SYMBOL(tcf_exts_dump_stats); 605EXPORT_SYMBOL(tcf_exts_dump_stats);
616 606