aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/datapath.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/openvswitch/datapath.c')
-rw-r--r--net/openvswitch/datapath.c77
1 files changed, 17 insertions, 60 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 72e68743c643..60b9be3b9477 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -59,8 +59,6 @@
59#include "vport-internal_dev.h" 59#include "vport-internal_dev.h"
60#include "vport-netdev.h" 60#include "vport-netdev.h"
61 61
62#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
63
64int ovs_net_id __read_mostly; 62int ovs_net_id __read_mostly;
65 63
66static void ovs_notify(struct sk_buff *skb, struct genl_info *info, 64static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
@@ -163,7 +161,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
163{ 161{
164 struct datapath *dp = container_of(rcu, struct datapath, rcu); 162 struct datapath *dp = container_of(rcu, struct datapath, rcu);
165 163
166 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false); 164 ovs_flow_tbl_destroy(&dp->table, false);
167 free_percpu(dp->stats_percpu); 165 free_percpu(dp->stats_percpu);
168 release_net(ovs_dp_get_net(dp)); 166 release_net(ovs_dp_get_net(dp));
169 kfree(dp->ports); 167 kfree(dp->ports);
@@ -235,7 +233,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
235 } 233 }
236 234
237 /* Look up flow. */ 235 /* Look up flow. */
238 flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key); 236 flow = ovs_flow_tbl_lookup(&dp->table, &key);
239 if (unlikely(!flow)) { 237 if (unlikely(!flow)) {
240 struct dp_upcall_info upcall; 238 struct dp_upcall_info upcall;
241 239
@@ -453,23 +451,6 @@ out:
453 return err; 451 return err;
454} 452}
455 453
456/* Called with ovs_mutex. */
457static int flush_flows(struct datapath *dp)
458{
459 struct flow_table *old_table;
460 struct flow_table *new_table;
461
462 old_table = ovsl_dereference(dp->table);
463 new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
464 if (!new_table)
465 return -ENOMEM;
466
467 rcu_assign_pointer(dp->table, new_table);
468
469 ovs_flow_tbl_destroy(old_table, true);
470 return 0;
471}
472
473static void clear_stats(struct sw_flow *flow) 454static void clear_stats(struct sw_flow *flow)
474{ 455{
475 flow->used = 0; 456 flow->used = 0;
@@ -584,11 +565,9 @@ static struct genl_ops dp_packet_genl_ops[] = {
584 565
585static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) 566static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
586{ 567{
587 struct flow_table *table;
588 int i; 568 int i;
589 569
590 table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held()); 570 stats->n_flows = ovs_flow_tbl_count(&dp->table);
591 stats->n_flows = ovs_flow_tbl_count(table);
592 571
593 stats->n_hit = stats->n_missed = stats->n_lost = 0; 572 stats->n_hit = stats->n_missed = stats->n_lost = 0;
594 for_each_possible_cpu(i) { 573 for_each_possible_cpu(i) {
@@ -773,7 +752,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
773 struct sw_flow_mask mask; 752 struct sw_flow_mask mask;
774 struct sk_buff *reply; 753 struct sk_buff *reply;
775 struct datapath *dp; 754 struct datapath *dp;
776 struct flow_table *table;
777 struct sw_flow_actions *acts = NULL; 755 struct sw_flow_actions *acts = NULL;
778 struct sw_flow_match match; 756 struct sw_flow_match match;
779 int error; 757 int error;
@@ -814,12 +792,9 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
814 if (!dp) 792 if (!dp)
815 goto err_unlock_ovs; 793 goto err_unlock_ovs;
816 794
817 table = ovsl_dereference(dp->table);
818
819 /* Check if this is a duplicate flow */ 795 /* Check if this is a duplicate flow */
820 flow = ovs_flow_tbl_lookup(table, &key); 796 flow = ovs_flow_tbl_lookup(&dp->table, &key);
821 if (!flow) { 797 if (!flow) {
822 struct flow_table *new_table = NULL;
823 struct sw_flow_mask *mask_p; 798 struct sw_flow_mask *mask_p;
824 799
825 /* Bail out if we're not allowed to create a new flow. */ 800 /* Bail out if we're not allowed to create a new flow. */
@@ -827,19 +802,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
827 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) 802 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
828 goto err_unlock_ovs; 803 goto err_unlock_ovs;
829 804
830 /* Expand table, if necessary, to make room. */
831 if (ovs_flow_tbl_need_to_expand(table))
832 new_table = ovs_flow_tbl_expand(table);
833 else if (time_after(jiffies, dp->last_rehash + REHASH_FLOW_INTERVAL))
834 new_table = ovs_flow_tbl_rehash(table);
835
836 if (new_table && !IS_ERR(new_table)) {
837 rcu_assign_pointer(dp->table, new_table);
838 ovs_flow_tbl_destroy(table, true);
839 table = ovsl_dereference(dp->table);
840 dp->last_rehash = jiffies;
841 }
842
843 /* Allocate flow. */ 805 /* Allocate flow. */
844 flow = ovs_flow_alloc(); 806 flow = ovs_flow_alloc();
845 if (IS_ERR(flow)) { 807 if (IS_ERR(flow)) {
@@ -852,7 +814,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
852 flow->unmasked_key = key; 814 flow->unmasked_key = key;
853 815
854 /* Make sure mask is unique in the system */ 816 /* Make sure mask is unique in the system */
855 mask_p = ovs_sw_flow_mask_find(table, &mask); 817 mask_p = ovs_sw_flow_mask_find(&dp->table, &mask);
856 if (!mask_p) { 818 if (!mask_p) {
857 /* Allocate a new mask if none exsits. */ 819 /* Allocate a new mask if none exsits. */
858 mask_p = ovs_sw_flow_mask_alloc(); 820 mask_p = ovs_sw_flow_mask_alloc();
@@ -860,7 +822,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
860 goto err_flow_free; 822 goto err_flow_free;
861 mask_p->key = mask.key; 823 mask_p->key = mask.key;
862 mask_p->range = mask.range; 824 mask_p->range = mask.range;
863 ovs_sw_flow_mask_insert(table, mask_p); 825 ovs_sw_flow_mask_insert(&dp->table, mask_p);
864 } 826 }
865 827
866 ovs_sw_flow_mask_add_ref(mask_p); 828 ovs_sw_flow_mask_add_ref(mask_p);
@@ -868,7 +830,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
868 rcu_assign_pointer(flow->sf_acts, acts); 830 rcu_assign_pointer(flow->sf_acts, acts);
869 831
870 /* Put flow in bucket. */ 832 /* Put flow in bucket. */
871 ovs_flow_tbl_insert(table, flow); 833 ovs_flow_tbl_insert(&dp->table, flow);
872 834
873 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, 835 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
874 info->snd_seq, OVS_FLOW_CMD_NEW); 836 info->snd_seq, OVS_FLOW_CMD_NEW);
@@ -936,7 +898,6 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
936 struct sk_buff *reply; 898 struct sk_buff *reply;
937 struct sw_flow *flow; 899 struct sw_flow *flow;
938 struct datapath *dp; 900 struct datapath *dp;
939 struct flow_table *table;
940 struct sw_flow_match match; 901 struct sw_flow_match match;
941 int err; 902 int err;
942 903
@@ -957,8 +918,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
957 goto unlock; 918 goto unlock;
958 } 919 }
959 920
960 table = ovsl_dereference(dp->table); 921 flow = ovs_flow_tbl_lookup(&dp->table, &key);
961 flow = ovs_flow_tbl_lookup(table, &key);
962 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 922 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
963 err = -ENOENT; 923 err = -ENOENT;
964 goto unlock; 924 goto unlock;
@@ -986,7 +946,6 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
986 struct sk_buff *reply; 946 struct sk_buff *reply;
987 struct sw_flow *flow; 947 struct sw_flow *flow;
988 struct datapath *dp; 948 struct datapath *dp;
989 struct flow_table *table;
990 struct sw_flow_match match; 949 struct sw_flow_match match;
991 int err; 950 int err;
992 951
@@ -998,7 +957,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
998 } 957 }
999 958
1000 if (!a[OVS_FLOW_ATTR_KEY]) { 959 if (!a[OVS_FLOW_ATTR_KEY]) {
1001 err = flush_flows(dp); 960 err = ovs_flow_tbl_flush(&dp->table);
1002 goto unlock; 961 goto unlock;
1003 } 962 }
1004 963
@@ -1007,8 +966,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1007 if (err) 966 if (err)
1008 goto unlock; 967 goto unlock;
1009 968
1010 table = ovsl_dereference(dp->table); 969 flow = ovs_flow_tbl_lookup(&dp->table, &key);
1011 flow = ovs_flow_tbl_lookup(table, &key);
1012 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 970 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
1013 err = -ENOENT; 971 err = -ENOENT;
1014 goto unlock; 972 goto unlock;
@@ -1020,7 +978,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1020 goto unlock; 978 goto unlock;
1021 } 979 }
1022 980
1023 ovs_flow_tbl_remove(table, flow); 981 ovs_flow_tbl_remove(&dp->table, flow);
1024 982
1025 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, 983 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
1026 info->snd_seq, 0, OVS_FLOW_CMD_DEL); 984 info->snd_seq, 0, OVS_FLOW_CMD_DEL);
@@ -1039,8 +997,8 @@ unlock:
1039static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 997static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1040{ 998{
1041 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 999 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1000 struct table_instance *ti;
1042 struct datapath *dp; 1001 struct datapath *dp;
1043 struct flow_table *table;
1044 1002
1045 rcu_read_lock(); 1003 rcu_read_lock();
1046 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1004 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1049,14 +1007,14 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1049 return -ENODEV; 1007 return -ENODEV;
1050 } 1008 }
1051 1009
1052 table = rcu_dereference(dp->table); 1010 ti = rcu_dereference(dp->table.ti);
1053 for (;;) { 1011 for (;;) {
1054 struct sw_flow *flow; 1012 struct sw_flow *flow;
1055 u32 bucket, obj; 1013 u32 bucket, obj;
1056 1014
1057 bucket = cb->args[0]; 1015 bucket = cb->args[0];
1058 obj = cb->args[1]; 1016 obj = cb->args[1];
1059 flow = ovs_flow_tbl_dump_next(table, &bucket, &obj); 1017 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1060 if (!flow) 1018 if (!flow)
1061 break; 1019 break;
1062 1020
@@ -1220,9 +1178,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1220 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); 1178 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1221 1179
1222 /* Allocate table. */ 1180 /* Allocate table. */
1223 err = -ENOMEM; 1181 err = ovs_flow_tbl_init(&dp->table);
1224 rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS)); 1182 if (err)
1225 if (!dp->table)
1226 goto err_free_dp; 1183 goto err_free_dp;
1227 1184
1228 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); 1185 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
@@ -1279,7 +1236,7 @@ err_destroy_ports_array:
1279err_destroy_percpu: 1236err_destroy_percpu:
1280 free_percpu(dp->stats_percpu); 1237 free_percpu(dp->stats_percpu);
1281err_destroy_table: 1238err_destroy_table:
1282 ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false); 1239 ovs_flow_tbl_destroy(&dp->table, false);
1283err_free_dp: 1240err_free_dp:
1284 release_net(ovs_dp_get_net(dp)); 1241 release_net(ovs_dp_get_net(dp));
1285 kfree(dp); 1242 kfree(dp);