diff options
author | Pravin B Shelar <pshelar@nicira.com> | 2012-02-22 22:58:59 -0500 |
---|---|---|
committer | Jesse Gross <jesse@nicira.com> | 2012-08-22 17:48:55 -0400 |
commit | 46df7b814548849deee01f50bc75f8f5ae8cd767 (patch) | |
tree | 1663fa6ae46edcba3787c0a4ff839365bb52bcbe /net/openvswitch | |
parent | 0d7614f09c1ebdbaa1599a5aba7593f147bf96ee (diff) |
openvswitch: Add support for network namespaces.
Following patch adds support for network namespace to openvswitch.
Since it must release devices when namespaces are destroyed, a
side effect of this patch is that the module no longer keeps a
refcount but instead cleans up any state when it is unloaded.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch')
-rw-r--r-- | net/openvswitch/datapath.c | 269 | ||||
-rw-r--r-- | net/openvswitch/datapath.h | 19 | ||||
-rw-r--r-- | net/openvswitch/dp_notify.c | 8 | ||||
-rw-r--r-- | net/openvswitch/vport-internal_dev.c | 7 | ||||
-rw-r--r-- | net/openvswitch/vport-netdev.c | 2 | ||||
-rw-r--r-- | net/openvswitch/vport.c | 22 | ||||
-rw-r--r-- | net/openvswitch/vport.h | 3 |
7 files changed, 207 insertions, 123 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d8277d29e710..cad39fca75a9 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -49,12 +49,29 @@ | |||
49 | #include <linux/dmi.h> | 49 | #include <linux/dmi.h> |
50 | #include <linux/workqueue.h> | 50 | #include <linux/workqueue.h> |
51 | #include <net/genetlink.h> | 51 | #include <net/genetlink.h> |
52 | #include <net/net_namespace.h> | ||
53 | #include <net/netns/generic.h> | ||
52 | 54 | ||
53 | #include "datapath.h" | 55 | #include "datapath.h" |
54 | #include "flow.h" | 56 | #include "flow.h" |
55 | #include "vport-internal_dev.h" | 57 | #include "vport-internal_dev.h" |
56 | 58 | ||
57 | /** | 59 | /** |
60 | * struct ovs_net - Per net-namespace data for ovs. | ||
61 | * @dps: List of datapaths to enable dumping them all out. | ||
62 | * Protected by genl_mutex. | ||
63 | */ | ||
64 | struct ovs_net { | ||
65 | struct list_head dps; | ||
66 | }; | ||
67 | |||
68 | static int ovs_net_id __read_mostly; | ||
69 | |||
70 | #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) | ||
71 | static void rehash_flow_table(struct work_struct *work); | ||
72 | static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); | ||
73 | |||
74 | /** | ||
58 | * DOC: Locking: | 75 | * DOC: Locking: |
59 | * | 76 | * |
60 | * Writes to device state (add/remove datapath, port, set operations on vports, | 77 | * Writes to device state (add/remove datapath, port, set operations on vports, |
@@ -71,29 +88,21 @@ | |||
71 | * each other. | 88 | * each other. |
72 | */ | 89 | */ |
73 | 90 | ||
74 | /* Global list of datapaths to enable dumping them all out. | ||
75 | * Protected by genl_mutex. | ||
76 | */ | ||
77 | static LIST_HEAD(dps); | ||
78 | |||
79 | #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) | ||
80 | static void rehash_flow_table(struct work_struct *work); | ||
81 | static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); | ||
82 | |||
83 | static struct vport *new_vport(const struct vport_parms *); | 91 | static struct vport *new_vport(const struct vport_parms *); |
84 | static int queue_gso_packets(int dp_ifindex, struct sk_buff *, | 92 | static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *, |
85 | const struct dp_upcall_info *); | 93 | const struct dp_upcall_info *); |
86 | static int queue_userspace_packet(int dp_ifindex, struct sk_buff *, | 94 | static int queue_userspace_packet(struct net *, int dp_ifindex, |
95 | struct sk_buff *, | ||
87 | const struct dp_upcall_info *); | 96 | const struct dp_upcall_info *); |
88 | 97 | ||
89 | /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ | 98 | /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ |
90 | static struct datapath *get_dp(int dp_ifindex) | 99 | static struct datapath *get_dp(struct net *net, int dp_ifindex) |
91 | { | 100 | { |
92 | struct datapath *dp = NULL; | 101 | struct datapath *dp = NULL; |
93 | struct net_device *dev; | 102 | struct net_device *dev; |
94 | 103 | ||
95 | rcu_read_lock(); | 104 | rcu_read_lock(); |
96 | dev = dev_get_by_index_rcu(&init_net, dp_ifindex); | 105 | dev = dev_get_by_index_rcu(net, dp_ifindex); |
97 | if (dev) { | 106 | if (dev) { |
98 | struct vport *vport = ovs_internal_dev_get_vport(dev); | 107 | struct vport *vport = ovs_internal_dev_get_vport(dev); |
99 | if (vport) | 108 | if (vport) |
@@ -135,6 +144,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) | |||
135 | 144 | ||
136 | ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); | 145 | ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); |
137 | free_percpu(dp->stats_percpu); | 146 | free_percpu(dp->stats_percpu); |
147 | release_net(ovs_dp_get_net(dp)); | ||
138 | kfree(dp); | 148 | kfree(dp); |
139 | } | 149 | } |
140 | 150 | ||
@@ -220,11 +230,12 @@ static struct genl_family dp_packet_genl_family = { | |||
220 | .hdrsize = sizeof(struct ovs_header), | 230 | .hdrsize = sizeof(struct ovs_header), |
221 | .name = OVS_PACKET_FAMILY, | 231 | .name = OVS_PACKET_FAMILY, |
222 | .version = OVS_PACKET_VERSION, | 232 | .version = OVS_PACKET_VERSION, |
223 | .maxattr = OVS_PACKET_ATTR_MAX | 233 | .maxattr = OVS_PACKET_ATTR_MAX, |
234 | .netnsok = true | ||
224 | }; | 235 | }; |
225 | 236 | ||
226 | int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, | 237 | int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, |
227 | const struct dp_upcall_info *upcall_info) | 238 | const struct dp_upcall_info *upcall_info) |
228 | { | 239 | { |
229 | struct dp_stats_percpu *stats; | 240 | struct dp_stats_percpu *stats; |
230 | int dp_ifindex; | 241 | int dp_ifindex; |
@@ -242,9 +253,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, | |||
242 | } | 253 | } |
243 | 254 | ||
244 | if (!skb_is_gso(skb)) | 255 | if (!skb_is_gso(skb)) |
245 | err = queue_userspace_packet(dp_ifindex, skb, upcall_info); | 256 | err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); |
246 | else | 257 | else |
247 | err = queue_gso_packets(dp_ifindex, skb, upcall_info); | 258 | err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); |
248 | if (err) | 259 | if (err) |
249 | goto err; | 260 | goto err; |
250 | 261 | ||
@@ -260,7 +271,8 @@ err: | |||
260 | return err; | 271 | return err; |
261 | } | 272 | } |
262 | 273 | ||
263 | static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, | 274 | static int queue_gso_packets(struct net *net, int dp_ifindex, |
275 | struct sk_buff *skb, | ||
264 | const struct dp_upcall_info *upcall_info) | 276 | const struct dp_upcall_info *upcall_info) |
265 | { | 277 | { |
266 | unsigned short gso_type = skb_shinfo(skb)->gso_type; | 278 | unsigned short gso_type = skb_shinfo(skb)->gso_type; |
@@ -276,7 +288,7 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, | |||
276 | /* Queue all of the segments. */ | 288 | /* Queue all of the segments. */ |
277 | skb = segs; | 289 | skb = segs; |
278 | do { | 290 | do { |
279 | err = queue_userspace_packet(dp_ifindex, skb, upcall_info); | 291 | err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info); |
280 | if (err) | 292 | if (err) |
281 | break; | 293 | break; |
282 | 294 | ||
@@ -306,7 +318,8 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, | |||
306 | return err; | 318 | return err; |
307 | } | 319 | } |
308 | 320 | ||
309 | static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, | 321 | static int queue_userspace_packet(struct net *net, int dp_ifindex, |
322 | struct sk_buff *skb, | ||
310 | const struct dp_upcall_info *upcall_info) | 323 | const struct dp_upcall_info *upcall_info) |
311 | { | 324 | { |
312 | struct ovs_header *upcall; | 325 | struct ovs_header *upcall; |
@@ -362,7 +375,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, | |||
362 | 375 | ||
363 | skb_copy_and_csum_dev(skb, nla_data(nla)); | 376 | skb_copy_and_csum_dev(skb, nla_data(nla)); |
364 | 377 | ||
365 | err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); | 378 | err = genlmsg_unicast(net, user_skb, upcall_info->pid); |
366 | 379 | ||
367 | out: | 380 | out: |
368 | kfree_skb(nskb); | 381 | kfree_skb(nskb); |
@@ -370,15 +383,10 @@ out: | |||
370 | } | 383 | } |
371 | 384 | ||
372 | /* Called with genl_mutex. */ | 385 | /* Called with genl_mutex. */ |
373 | static int flush_flows(int dp_ifindex) | 386 | static int flush_flows(struct datapath *dp) |
374 | { | 387 | { |
375 | struct flow_table *old_table; | 388 | struct flow_table *old_table; |
376 | struct flow_table *new_table; | 389 | struct flow_table *new_table; |
377 | struct datapath *dp; | ||
378 | |||
379 | dp = get_dp(dp_ifindex); | ||
380 | if (!dp) | ||
381 | return -ENODEV; | ||
382 | 390 | ||
383 | old_table = genl_dereference(dp->table); | 391 | old_table = genl_dereference(dp->table); |
384 | new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); | 392 | new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); |
@@ -668,7 +676,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
668 | packet->priority = flow->key.phy.priority; | 676 | packet->priority = flow->key.phy.priority; |
669 | 677 | ||
670 | rcu_read_lock(); | 678 | rcu_read_lock(); |
671 | dp = get_dp(ovs_header->dp_ifindex); | 679 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
672 | err = -ENODEV; | 680 | err = -ENODEV; |
673 | if (!dp) | 681 | if (!dp) |
674 | goto err_unlock; | 682 | goto err_unlock; |
@@ -742,7 +750,8 @@ static struct genl_family dp_flow_genl_family = { | |||
742 | .hdrsize = sizeof(struct ovs_header), | 750 | .hdrsize = sizeof(struct ovs_header), |
743 | .name = OVS_FLOW_FAMILY, | 751 | .name = OVS_FLOW_FAMILY, |
744 | .version = OVS_FLOW_VERSION, | 752 | .version = OVS_FLOW_VERSION, |
745 | .maxattr = OVS_FLOW_ATTR_MAX | 753 | .maxattr = OVS_FLOW_ATTR_MAX, |
754 | .netnsok = true | ||
746 | }; | 755 | }; |
747 | 756 | ||
748 | static struct genl_multicast_group ovs_dp_flow_multicast_group = { | 757 | static struct genl_multicast_group ovs_dp_flow_multicast_group = { |
@@ -894,7 +903,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
894 | goto error; | 903 | goto error; |
895 | } | 904 | } |
896 | 905 | ||
897 | dp = get_dp(ovs_header->dp_ifindex); | 906 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
898 | error = -ENODEV; | 907 | error = -ENODEV; |
899 | if (!dp) | 908 | if (!dp) |
900 | goto error; | 909 | goto error; |
@@ -995,7 +1004,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
995 | ovs_dp_flow_multicast_group.id, info->nlhdr, | 1004 | ovs_dp_flow_multicast_group.id, info->nlhdr, |
996 | GFP_KERNEL); | 1005 | GFP_KERNEL); |
997 | else | 1006 | else |
998 | netlink_set_err(init_net.genl_sock, 0, | 1007 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, |
999 | ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); | 1008 | ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); |
1000 | return 0; | 1009 | return 0; |
1001 | 1010 | ||
@@ -1023,7 +1032,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1023 | if (err) | 1032 | if (err) |
1024 | return err; | 1033 | return err; |
1025 | 1034 | ||
1026 | dp = get_dp(ovs_header->dp_ifindex); | 1035 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1027 | if (!dp) | 1036 | if (!dp) |
1028 | return -ENODEV; | 1037 | return -ENODEV; |
1029 | 1038 | ||
@@ -1052,16 +1061,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1052 | int err; | 1061 | int err; |
1053 | int key_len; | 1062 | int key_len; |
1054 | 1063 | ||
1064 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | ||
1065 | if (!dp) | ||
1066 | return -ENODEV; | ||
1067 | |||
1055 | if (!a[OVS_FLOW_ATTR_KEY]) | 1068 | if (!a[OVS_FLOW_ATTR_KEY]) |
1056 | return flush_flows(ovs_header->dp_ifindex); | 1069 | return flush_flows(dp); |
1070 | |||
1057 | err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); | 1071 | err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); |
1058 | if (err) | 1072 | if (err) |
1059 | return err; | 1073 | return err; |
1060 | 1074 | ||
1061 | dp = get_dp(ovs_header->dp_ifindex); | ||
1062 | if (!dp) | ||
1063 | return -ENODEV; | ||
1064 | |||
1065 | table = genl_dereference(dp->table); | 1075 | table = genl_dereference(dp->table); |
1066 | flow = ovs_flow_tbl_lookup(table, &key, key_len); | 1076 | flow = ovs_flow_tbl_lookup(table, &key, key_len); |
1067 | if (!flow) | 1077 | if (!flow) |
@@ -1090,7 +1100,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1090 | struct datapath *dp; | 1100 | struct datapath *dp; |
1091 | struct flow_table *table; | 1101 | struct flow_table *table; |
1092 | 1102 | ||
1093 | dp = get_dp(ovs_header->dp_ifindex); | 1103 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1094 | if (!dp) | 1104 | if (!dp) |
1095 | return -ENODEV; | 1105 | return -ENODEV; |
1096 | 1106 | ||
@@ -1152,7 +1162,8 @@ static struct genl_family dp_datapath_genl_family = { | |||
1152 | .hdrsize = sizeof(struct ovs_header), | 1162 | .hdrsize = sizeof(struct ovs_header), |
1153 | .name = OVS_DATAPATH_FAMILY, | 1163 | .name = OVS_DATAPATH_FAMILY, |
1154 | .version = OVS_DATAPATH_VERSION, | 1164 | .version = OVS_DATAPATH_VERSION, |
1155 | .maxattr = OVS_DP_ATTR_MAX | 1165 | .maxattr = OVS_DP_ATTR_MAX, |
1166 | .netnsok = true | ||
1156 | }; | 1167 | }; |
1157 | 1168 | ||
1158 | static struct genl_multicast_group ovs_dp_datapath_multicast_group = { | 1169 | static struct genl_multicast_group ovs_dp_datapath_multicast_group = { |
@@ -1210,18 +1221,19 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid, | |||
1210 | } | 1221 | } |
1211 | 1222 | ||
1212 | /* Called with genl_mutex and optionally with RTNL lock also. */ | 1223 | /* Called with genl_mutex and optionally with RTNL lock also. */ |
1213 | static struct datapath *lookup_datapath(struct ovs_header *ovs_header, | 1224 | static struct datapath *lookup_datapath(struct net *net, |
1225 | struct ovs_header *ovs_header, | ||
1214 | struct nlattr *a[OVS_DP_ATTR_MAX + 1]) | 1226 | struct nlattr *a[OVS_DP_ATTR_MAX + 1]) |
1215 | { | 1227 | { |
1216 | struct datapath *dp; | 1228 | struct datapath *dp; |
1217 | 1229 | ||
1218 | if (!a[OVS_DP_ATTR_NAME]) | 1230 | if (!a[OVS_DP_ATTR_NAME]) |
1219 | dp = get_dp(ovs_header->dp_ifindex); | 1231 | dp = get_dp(net, ovs_header->dp_ifindex); |
1220 | else { | 1232 | else { |
1221 | struct vport *vport; | 1233 | struct vport *vport; |
1222 | 1234 | ||
1223 | rcu_read_lock(); | 1235 | rcu_read_lock(); |
1224 | vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME])); | 1236 | vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); |
1225 | dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; | 1237 | dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; |
1226 | rcu_read_unlock(); | 1238 | rcu_read_unlock(); |
1227 | } | 1239 | } |
@@ -1235,6 +1247,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1235 | struct sk_buff *reply; | 1247 | struct sk_buff *reply; |
1236 | struct datapath *dp; | 1248 | struct datapath *dp; |
1237 | struct vport *vport; | 1249 | struct vport *vport; |
1250 | struct ovs_net *ovs_net; | ||
1238 | int err; | 1251 | int err; |
1239 | 1252 | ||
1240 | err = -EINVAL; | 1253 | err = -EINVAL; |
@@ -1242,15 +1255,14 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1242 | goto err; | 1255 | goto err; |
1243 | 1256 | ||
1244 | rtnl_lock(); | 1257 | rtnl_lock(); |
1245 | err = -ENODEV; | ||
1246 | if (!try_module_get(THIS_MODULE)) | ||
1247 | goto err_unlock_rtnl; | ||
1248 | 1258 | ||
1249 | err = -ENOMEM; | 1259 | err = -ENOMEM; |
1250 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); | 1260 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); |
1251 | if (dp == NULL) | 1261 | if (dp == NULL) |
1252 | goto err_put_module; | 1262 | goto err_unlock_rtnl; |
1263 | |||
1253 | INIT_LIST_HEAD(&dp->port_list); | 1264 | INIT_LIST_HEAD(&dp->port_list); |
1265 | ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); | ||
1254 | 1266 | ||
1255 | /* Allocate table. */ | 1267 | /* Allocate table. */ |
1256 | err = -ENOMEM; | 1268 | err = -ENOMEM; |
@@ -1287,7 +1299,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1287 | if (IS_ERR(reply)) | 1299 | if (IS_ERR(reply)) |
1288 | goto err_destroy_local_port; | 1300 | goto err_destroy_local_port; |
1289 | 1301 | ||
1290 | list_add_tail(&dp->list_node, &dps); | 1302 | ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); |
1303 | list_add_tail(&dp->list_node, &ovs_net->dps); | ||
1291 | rtnl_unlock(); | 1304 | rtnl_unlock(); |
1292 | 1305 | ||
1293 | genl_notify(reply, genl_info_net(info), info->snd_pid, | 1306 | genl_notify(reply, genl_info_net(info), info->snd_pid, |
@@ -1302,34 +1315,20 @@ err_destroy_percpu: | |||
1302 | err_destroy_table: | 1315 | err_destroy_table: |
1303 | ovs_flow_tbl_destroy(genl_dereference(dp->table)); | 1316 | ovs_flow_tbl_destroy(genl_dereference(dp->table)); |
1304 | err_free_dp: | 1317 | err_free_dp: |
1318 | release_net(ovs_dp_get_net(dp)); | ||
1305 | kfree(dp); | 1319 | kfree(dp); |
1306 | err_put_module: | ||
1307 | module_put(THIS_MODULE); | ||
1308 | err_unlock_rtnl: | 1320 | err_unlock_rtnl: |
1309 | rtnl_unlock(); | 1321 | rtnl_unlock(); |
1310 | err: | 1322 | err: |
1311 | return err; | 1323 | return err; |
1312 | } | 1324 | } |
1313 | 1325 | ||
1314 | static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) | 1326 | /* Called with genl_mutex. */ |
1327 | static void __dp_destroy(struct datapath *dp) | ||
1315 | { | 1328 | { |
1316 | struct vport *vport, *next_vport; | 1329 | struct vport *vport, *next_vport; |
1317 | struct sk_buff *reply; | ||
1318 | struct datapath *dp; | ||
1319 | int err; | ||
1320 | 1330 | ||
1321 | rtnl_lock(); | 1331 | rtnl_lock(); |
1322 | dp = lookup_datapath(info->userhdr, info->attrs); | ||
1323 | err = PTR_ERR(dp); | ||
1324 | if (IS_ERR(dp)) | ||
1325 | goto exit_unlock; | ||
1326 | |||
1327 | reply = ovs_dp_cmd_build_info(dp, info->snd_pid, | ||
1328 | info->snd_seq, OVS_DP_CMD_DEL); | ||
1329 | err = PTR_ERR(reply); | ||
1330 | if (IS_ERR(reply)) | ||
1331 | goto exit_unlock; | ||
1332 | |||
1333 | list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) | 1332 | list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) |
1334 | if (vport->port_no != OVSP_LOCAL) | 1333 | if (vport->port_no != OVSP_LOCAL) |
1335 | ovs_dp_detach_port(vport); | 1334 | ovs_dp_detach_port(vport); |
@@ -1345,17 +1344,32 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1345 | rtnl_unlock(); | 1344 | rtnl_unlock(); |
1346 | 1345 | ||
1347 | call_rcu(&dp->rcu, destroy_dp_rcu); | 1346 | call_rcu(&dp->rcu, destroy_dp_rcu); |
1348 | module_put(THIS_MODULE); | 1347 | } |
1348 | |||
1349 | static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) | ||
1350 | { | ||
1351 | struct sk_buff *reply; | ||
1352 | struct datapath *dp; | ||
1353 | int err; | ||
1354 | |||
1355 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); | ||
1356 | err = PTR_ERR(dp); | ||
1357 | if (IS_ERR(dp)) | ||
1358 | return err; | ||
1359 | |||
1360 | reply = ovs_dp_cmd_build_info(dp, info->snd_pid, | ||
1361 | info->snd_seq, OVS_DP_CMD_DEL); | ||
1362 | err = PTR_ERR(reply); | ||
1363 | if (IS_ERR(reply)) | ||
1364 | return err; | ||
1365 | |||
1366 | __dp_destroy(dp); | ||
1349 | 1367 | ||
1350 | genl_notify(reply, genl_info_net(info), info->snd_pid, | 1368 | genl_notify(reply, genl_info_net(info), info->snd_pid, |
1351 | ovs_dp_datapath_multicast_group.id, info->nlhdr, | 1369 | ovs_dp_datapath_multicast_group.id, info->nlhdr, |
1352 | GFP_KERNEL); | 1370 | GFP_KERNEL); |
1353 | 1371 | ||
1354 | return 0; | 1372 | return 0; |
1355 | |||
1356 | exit_unlock: | ||
1357 | rtnl_unlock(); | ||
1358 | return err; | ||
1359 | } | 1373 | } |
1360 | 1374 | ||
1361 | static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) | 1375 | static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) |
@@ -1364,7 +1378,7 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1364 | struct datapath *dp; | 1378 | struct datapath *dp; |
1365 | int err; | 1379 | int err; |
1366 | 1380 | ||
1367 | dp = lookup_datapath(info->userhdr, info->attrs); | 1381 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
1368 | if (IS_ERR(dp)) | 1382 | if (IS_ERR(dp)) |
1369 | return PTR_ERR(dp); | 1383 | return PTR_ERR(dp); |
1370 | 1384 | ||
@@ -1372,7 +1386,7 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1372 | info->snd_seq, OVS_DP_CMD_NEW); | 1386 | info->snd_seq, OVS_DP_CMD_NEW); |
1373 | if (IS_ERR(reply)) { | 1387 | if (IS_ERR(reply)) { |
1374 | err = PTR_ERR(reply); | 1388 | err = PTR_ERR(reply); |
1375 | netlink_set_err(init_net.genl_sock, 0, | 1389 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, |
1376 | ovs_dp_datapath_multicast_group.id, err); | 1390 | ovs_dp_datapath_multicast_group.id, err); |
1377 | return 0; | 1391 | return 0; |
1378 | } | 1392 | } |
@@ -1389,7 +1403,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1389 | struct sk_buff *reply; | 1403 | struct sk_buff *reply; |
1390 | struct datapath *dp; | 1404 | struct datapath *dp; |
1391 | 1405 | ||
1392 | dp = lookup_datapath(info->userhdr, info->attrs); | 1406 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
1393 | if (IS_ERR(dp)) | 1407 | if (IS_ERR(dp)) |
1394 | return PTR_ERR(dp); | 1408 | return PTR_ERR(dp); |
1395 | 1409 | ||
@@ -1403,11 +1417,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1403 | 1417 | ||
1404 | static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | 1418 | static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) |
1405 | { | 1419 | { |
1420 | struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); | ||
1406 | struct datapath *dp; | 1421 | struct datapath *dp; |
1407 | int skip = cb->args[0]; | 1422 | int skip = cb->args[0]; |
1408 | int i = 0; | 1423 | int i = 0; |
1409 | 1424 | ||
1410 | list_for_each_entry(dp, &dps, list_node) { | 1425 | list_for_each_entry(dp, &ovs_net->dps, list_node) { |
1411 | if (i >= skip && | 1426 | if (i >= skip && |
1412 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, | 1427 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, |
1413 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1428 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
@@ -1459,7 +1474,8 @@ static struct genl_family dp_vport_genl_family = { | |||
1459 | .hdrsize = sizeof(struct ovs_header), | 1474 | .hdrsize = sizeof(struct ovs_header), |
1460 | .name = OVS_VPORT_FAMILY, | 1475 | .name = OVS_VPORT_FAMILY, |
1461 | .version = OVS_VPORT_VERSION, | 1476 | .version = OVS_VPORT_VERSION, |
1462 | .maxattr = OVS_VPORT_ATTR_MAX | 1477 | .maxattr = OVS_VPORT_ATTR_MAX, |
1478 | .netnsok = true | ||
1463 | }; | 1479 | }; |
1464 | 1480 | ||
1465 | struct genl_multicast_group ovs_dp_vport_multicast_group = { | 1481 | struct genl_multicast_group ovs_dp_vport_multicast_group = { |
@@ -1525,14 +1541,15 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid, | |||
1525 | } | 1541 | } |
1526 | 1542 | ||
1527 | /* Called with RTNL lock or RCU read lock. */ | 1543 | /* Called with RTNL lock or RCU read lock. */ |
1528 | static struct vport *lookup_vport(struct ovs_header *ovs_header, | 1544 | static struct vport *lookup_vport(struct net *net, |
1545 | struct ovs_header *ovs_header, | ||
1529 | struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) | 1546 | struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) |
1530 | { | 1547 | { |
1531 | struct datapath *dp; | 1548 | struct datapath *dp; |
1532 | struct vport *vport; | 1549 | struct vport *vport; |
1533 | 1550 | ||
1534 | if (a[OVS_VPORT_ATTR_NAME]) { | 1551 | if (a[OVS_VPORT_ATTR_NAME]) { |
1535 | vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); | 1552 | vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME])); |
1536 | if (!vport) | 1553 | if (!vport) |
1537 | return ERR_PTR(-ENODEV); | 1554 | return ERR_PTR(-ENODEV); |
1538 | if (ovs_header->dp_ifindex && | 1555 | if (ovs_header->dp_ifindex && |
@@ -1545,7 +1562,7 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header, | |||
1545 | if (port_no >= DP_MAX_PORTS) | 1562 | if (port_no >= DP_MAX_PORTS) |
1546 | return ERR_PTR(-EFBIG); | 1563 | return ERR_PTR(-EFBIG); |
1547 | 1564 | ||
1548 | dp = get_dp(ovs_header->dp_ifindex); | 1565 | dp = get_dp(net, ovs_header->dp_ifindex); |
1549 | if (!dp) | 1566 | if (!dp) |
1550 | return ERR_PTR(-ENODEV); | 1567 | return ERR_PTR(-ENODEV); |
1551 | 1568 | ||
@@ -1574,7 +1591,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1574 | goto exit; | 1591 | goto exit; |
1575 | 1592 | ||
1576 | rtnl_lock(); | 1593 | rtnl_lock(); |
1577 | dp = get_dp(ovs_header->dp_ifindex); | 1594 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1578 | err = -ENODEV; | 1595 | err = -ENODEV; |
1579 | if (!dp) | 1596 | if (!dp) |
1580 | goto exit_unlock; | 1597 | goto exit_unlock; |
@@ -1638,7 +1655,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1638 | int err; | 1655 | int err; |
1639 | 1656 | ||
1640 | rtnl_lock(); | 1657 | rtnl_lock(); |
1641 | vport = lookup_vport(info->userhdr, a); | 1658 | vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); |
1642 | err = PTR_ERR(vport); | 1659 | err = PTR_ERR(vport); |
1643 | if (IS_ERR(vport)) | 1660 | if (IS_ERR(vport)) |
1644 | goto exit_unlock; | 1661 | goto exit_unlock; |
@@ -1658,7 +1675,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1658 | reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, | 1675 | reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, |
1659 | OVS_VPORT_CMD_NEW); | 1676 | OVS_VPORT_CMD_NEW); |
1660 | if (IS_ERR(reply)) { | 1677 | if (IS_ERR(reply)) { |
1661 | netlink_set_err(init_net.genl_sock, 0, | 1678 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, |
1662 | ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); | 1679 | ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); |
1663 | goto exit_unlock; | 1680 | goto exit_unlock; |
1664 | } | 1681 | } |
@@ -1679,7 +1696,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1679 | int err; | 1696 | int err; |
1680 | 1697 | ||
1681 | rtnl_lock(); | 1698 | rtnl_lock(); |
1682 | vport = lookup_vport(info->userhdr, a); | 1699 | vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); |
1683 | err = PTR_ERR(vport); | 1700 | err = PTR_ERR(vport); |
1684 | if (IS_ERR(vport)) | 1701 | if (IS_ERR(vport)) |
1685 | goto exit_unlock; | 1702 | goto exit_unlock; |
@@ -1714,7 +1731,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1714 | int err; | 1731 | int err; |
1715 | 1732 | ||
1716 | rcu_read_lock(); | 1733 | rcu_read_lock(); |
1717 | vport = lookup_vport(ovs_header, a); | 1734 | vport = lookup_vport(sock_net(skb->sk), ovs_header, a); |
1718 | err = PTR_ERR(vport); | 1735 | err = PTR_ERR(vport); |
1719 | if (IS_ERR(vport)) | 1736 | if (IS_ERR(vport)) |
1720 | goto exit_unlock; | 1737 | goto exit_unlock; |
@@ -1741,7 +1758,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1741 | u32 port_no; | 1758 | u32 port_no; |
1742 | int retval; | 1759 | int retval; |
1743 | 1760 | ||
1744 | dp = get_dp(ovs_header->dp_ifindex); | 1761 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1745 | if (!dp) | 1762 | if (!dp) |
1746 | return -ENODEV; | 1763 | return -ENODEV; |
1747 | 1764 | ||
@@ -1766,28 +1783,6 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1766 | return retval; | 1783 | return retval; |
1767 | } | 1784 | } |
1768 | 1785 | ||
1769 | static void rehash_flow_table(struct work_struct *work) | ||
1770 | { | ||
1771 | struct datapath *dp; | ||
1772 | |||
1773 | genl_lock(); | ||
1774 | |||
1775 | list_for_each_entry(dp, &dps, list_node) { | ||
1776 | struct flow_table *old_table = genl_dereference(dp->table); | ||
1777 | struct flow_table *new_table; | ||
1778 | |||
1779 | new_table = ovs_flow_tbl_rehash(old_table); | ||
1780 | if (!IS_ERR(new_table)) { | ||
1781 | rcu_assign_pointer(dp->table, new_table); | ||
1782 | ovs_flow_tbl_deferred_destroy(old_table); | ||
1783 | } | ||
1784 | } | ||
1785 | |||
1786 | genl_unlock(); | ||
1787 | |||
1788 | schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); | ||
1789 | } | ||
1790 | |||
1791 | static struct genl_ops dp_vport_genl_ops[] = { | 1786 | static struct genl_ops dp_vport_genl_ops[] = { |
1792 | { .cmd = OVS_VPORT_CMD_NEW, | 1787 | { .cmd = OVS_VPORT_CMD_NEW, |
1793 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | 1788 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ |
@@ -1872,6 +1867,59 @@ error: | |||
1872 | return err; | 1867 | return err; |
1873 | } | 1868 | } |
1874 | 1869 | ||
1870 | static void rehash_flow_table(struct work_struct *work) | ||
1871 | { | ||
1872 | struct datapath *dp; | ||
1873 | struct net *net; | ||
1874 | |||
1875 | genl_lock(); | ||
1876 | rtnl_lock(); | ||
1877 | for_each_net(net) { | ||
1878 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | ||
1879 | |||
1880 | list_for_each_entry(dp, &ovs_net->dps, list_node) { | ||
1881 | struct flow_table *old_table = genl_dereference(dp->table); | ||
1882 | struct flow_table *new_table; | ||
1883 | |||
1884 | new_table = ovs_flow_tbl_rehash(old_table); | ||
1885 | if (!IS_ERR(new_table)) { | ||
1886 | rcu_assign_pointer(dp->table, new_table); | ||
1887 | ovs_flow_tbl_deferred_destroy(old_table); | ||
1888 | } | ||
1889 | } | ||
1890 | } | ||
1891 | rtnl_unlock(); | ||
1892 | genl_unlock(); | ||
1893 | |||
1894 | schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); | ||
1895 | } | ||
1896 | |||
1897 | static int __net_init ovs_init_net(struct net *net) | ||
1898 | { | ||
1899 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | ||
1900 | |||
1901 | INIT_LIST_HEAD(&ovs_net->dps); | ||
1902 | return 0; | ||
1903 | } | ||
1904 | |||
1905 | static void __net_exit ovs_exit_net(struct net *net) | ||
1906 | { | ||
1907 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | ||
1908 | struct datapath *dp, *dp_next; | ||
1909 | |||
1910 | genl_lock(); | ||
1911 | list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) | ||
1912 | __dp_destroy(dp); | ||
1913 | genl_unlock(); | ||
1914 | } | ||
1915 | |||
1916 | static struct pernet_operations ovs_net_ops = { | ||
1917 | .init = ovs_init_net, | ||
1918 | .exit = ovs_exit_net, | ||
1919 | .id = &ovs_net_id, | ||
1920 | .size = sizeof(struct ovs_net), | ||
1921 | }; | ||
1922 | |||
1875 | static int __init dp_init(void) | 1923 | static int __init dp_init(void) |
1876 | { | 1924 | { |
1877 | struct sk_buff *dummy_skb; | 1925 | struct sk_buff *dummy_skb; |
@@ -1889,10 +1937,14 @@ static int __init dp_init(void) | |||
1889 | if (err) | 1937 | if (err) |
1890 | goto error_flow_exit; | 1938 | goto error_flow_exit; |
1891 | 1939 | ||
1892 | err = register_netdevice_notifier(&ovs_dp_device_notifier); | 1940 | err = register_pernet_device(&ovs_net_ops); |
1893 | if (err) | 1941 | if (err) |
1894 | goto error_vport_exit; | 1942 | goto error_vport_exit; |
1895 | 1943 | ||
1944 | err = register_netdevice_notifier(&ovs_dp_device_notifier); | ||
1945 | if (err) | ||
1946 | goto error_netns_exit; | ||
1947 | |||
1896 | err = dp_register_genl(); | 1948 | err = dp_register_genl(); |
1897 | if (err < 0) | 1949 | if (err < 0) |
1898 | goto error_unreg_notifier; | 1950 | goto error_unreg_notifier; |
@@ -1903,6 +1955,8 @@ static int __init dp_init(void) | |||
1903 | 1955 | ||
1904 | error_unreg_notifier: | 1956 | error_unreg_notifier: |
1905 | unregister_netdevice_notifier(&ovs_dp_device_notifier); | 1957 | unregister_netdevice_notifier(&ovs_dp_device_notifier); |
1958 | error_netns_exit: | ||
1959 | unregister_pernet_device(&ovs_net_ops); | ||
1906 | error_vport_exit: | 1960 | error_vport_exit: |
1907 | ovs_vport_exit(); | 1961 | ovs_vport_exit(); |
1908 | error_flow_exit: | 1962 | error_flow_exit: |
@@ -1914,9 +1968,10 @@ error: | |||
1914 | static void dp_cleanup(void) | 1968 | static void dp_cleanup(void) |
1915 | { | 1969 | { |
1916 | cancel_delayed_work_sync(&rehash_flow_wq); | 1970 | cancel_delayed_work_sync(&rehash_flow_wq); |
1917 | rcu_barrier(); | ||
1918 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); | 1971 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); |
1919 | unregister_netdevice_notifier(&ovs_dp_device_notifier); | 1972 | unregister_netdevice_notifier(&ovs_dp_device_notifier); |
1973 | unregister_pernet_device(&ovs_net_ops); | ||
1974 | rcu_barrier(); | ||
1920 | ovs_vport_exit(); | 1975 | ovs_vport_exit(); |
1921 | ovs_flow_exit(); | 1976 | ovs_flow_exit(); |
1922 | } | 1977 | } |
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index c1105c147531..771c11e13e34 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h | |||
@@ -27,8 +27,7 @@ | |||
27 | #include <linux/u64_stats_sync.h> | 27 | #include <linux/u64_stats_sync.h> |
28 | 28 | ||
29 | #include "flow.h" | 29 | #include "flow.h" |
30 | 30 | #include "vport.h" | |
31 | struct vport; | ||
32 | 31 | ||
33 | #define DP_MAX_PORTS 1024 | 32 | #define DP_MAX_PORTS 1024 |
34 | #define SAMPLE_ACTION_DEPTH 3 | 33 | #define SAMPLE_ACTION_DEPTH 3 |
@@ -63,6 +62,7 @@ struct dp_stats_percpu { | |||
63 | * @port_list: List of all ports in @ports in arbitrary order. RTNL required | 62 | * @port_list: List of all ports in @ports in arbitrary order. RTNL required |
64 | * to iterate or modify. | 63 | * to iterate or modify. |
65 | * @stats_percpu: Per-CPU datapath statistics. | 64 | * @stats_percpu: Per-CPU datapath statistics. |
65 | * @net: Reference to net namespace. | ||
66 | * | 66 | * |
67 | * Context: See the comment on locking at the top of datapath.c for additional | 67 | * Context: See the comment on locking at the top of datapath.c for additional |
68 | * locking information. | 68 | * locking information. |
@@ -80,6 +80,11 @@ struct datapath { | |||
80 | 80 | ||
81 | /* Stats. */ | 81 | /* Stats. */ |
82 | struct dp_stats_percpu __percpu *stats_percpu; | 82 | struct dp_stats_percpu __percpu *stats_percpu; |
83 | |||
84 | #ifdef CONFIG_NET_NS | ||
85 | /* Network namespace ref. */ | ||
86 | struct net *net; | ||
87 | #endif | ||
83 | }; | 88 | }; |
84 | 89 | ||
85 | /** | 90 | /** |
@@ -108,6 +113,16 @@ struct dp_upcall_info { | |||
108 | u32 pid; | 113 | u32 pid; |
109 | }; | 114 | }; |
110 | 115 | ||
116 | static inline struct net *ovs_dp_get_net(struct datapath *dp) | ||
117 | { | ||
118 | return read_pnet(&dp->net); | ||
119 | } | ||
120 | |||
121 | static inline void ovs_dp_set_net(struct datapath *dp, struct net *net) | ||
122 | { | ||
123 | write_pnet(&dp->net, net); | ||
124 | } | ||
125 | |||
111 | extern struct notifier_block ovs_dp_device_notifier; | 126 | extern struct notifier_block ovs_dp_device_notifier; |
112 | extern struct genl_multicast_group ovs_dp_vport_multicast_group; | 127 | extern struct genl_multicast_group ovs_dp_vport_multicast_group; |
113 | 128 | ||
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index 36dcee8fc84a..5558350e0d33 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c | |||
@@ -41,19 +41,21 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event, | |||
41 | case NETDEV_UNREGISTER: | 41 | case NETDEV_UNREGISTER: |
42 | if (!ovs_is_internal_dev(dev)) { | 42 | if (!ovs_is_internal_dev(dev)) { |
43 | struct sk_buff *notify; | 43 | struct sk_buff *notify; |
44 | struct datapath *dp = vport->dp; | ||
44 | 45 | ||
45 | notify = ovs_vport_cmd_build_info(vport, 0, 0, | 46 | notify = ovs_vport_cmd_build_info(vport, 0, 0, |
46 | OVS_VPORT_CMD_DEL); | 47 | OVS_VPORT_CMD_DEL); |
47 | ovs_dp_detach_port(vport); | 48 | ovs_dp_detach_port(vport); |
48 | if (IS_ERR(notify)) { | 49 | if (IS_ERR(notify)) { |
49 | netlink_set_err(init_net.genl_sock, 0, | 50 | netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0, |
50 | ovs_dp_vport_multicast_group.id, | 51 | ovs_dp_vport_multicast_group.id, |
51 | PTR_ERR(notify)); | 52 | PTR_ERR(notify)); |
52 | break; | 53 | break; |
53 | } | 54 | } |
54 | 55 | ||
55 | genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id, | 56 | genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0, |
56 | GFP_KERNEL); | 57 | ovs_dp_vport_multicast_group.id, |
58 | GFP_KERNEL); | ||
57 | } | 59 | } |
58 | break; | 60 | break; |
59 | } | 61 | } |
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 4061b9ee07f7..5d460c37df07 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c | |||
@@ -144,7 +144,7 @@ static void do_setup(struct net_device *netdev) | |||
144 | netdev->tx_queue_len = 0; | 144 | netdev->tx_queue_len = 0; |
145 | 145 | ||
146 | netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | | 146 | netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | |
147 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; | 147 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; |
148 | 148 | ||
149 | netdev->vlan_features = netdev->features; | 149 | netdev->vlan_features = netdev->features; |
150 | netdev->features |= NETIF_F_HW_VLAN_TX; | 150 | netdev->features |= NETIF_F_HW_VLAN_TX; |
@@ -175,9 +175,14 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) | |||
175 | goto error_free_vport; | 175 | goto error_free_vport; |
176 | } | 176 | } |
177 | 177 | ||
178 | dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp)); | ||
178 | internal_dev = internal_dev_priv(netdev_vport->dev); | 179 | internal_dev = internal_dev_priv(netdev_vport->dev); |
179 | internal_dev->vport = vport; | 180 | internal_dev->vport = vport; |
180 | 181 | ||
182 | /* Restrict bridge port to current netns. */ | ||
183 | if (vport->port_no == OVSP_LOCAL) | ||
184 | netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL; | ||
185 | |||
181 | err = register_netdevice(netdev_vport->dev); | 186 | err = register_netdevice(netdev_vport->dev); |
182 | if (err) | 187 | if (err) |
183 | goto error_free_netdev; | 188 | goto error_free_netdev; |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 6ea3551cc78c..3c1e58ba714b 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
@@ -83,7 +83,7 @@ static struct vport *netdev_create(const struct vport_parms *parms) | |||
83 | 83 | ||
84 | netdev_vport = netdev_vport_priv(vport); | 84 | netdev_vport = netdev_vport_priv(vport); |
85 | 85 | ||
86 | netdev_vport->dev = dev_get_by_name(&init_net, parms->name); | 86 | netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name); |
87 | if (!netdev_vport->dev) { | 87 | if (!netdev_vport->dev) { |
88 | err = -ENODEV; | 88 | err = -ENODEV; |
89 | goto error_free_vport; | 89 | goto error_free_vport; |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 6140336e79d7..9873acea9785 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -16,10 +16,10 @@ | |||
16 | * 02110-1301, USA | 16 | * 02110-1301, USA |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/dcache.h> | ||
20 | #include <linux/etherdevice.h> | 19 | #include <linux/etherdevice.h> |
21 | #include <linux/if.h> | 20 | #include <linux/if.h> |
22 | #include <linux/if_vlan.h> | 21 | #include <linux/if_vlan.h> |
22 | #include <linux/jhash.h> | ||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
@@ -27,7 +27,9 @@ | |||
27 | #include <linux/rcupdate.h> | 27 | #include <linux/rcupdate.h> |
28 | #include <linux/rtnetlink.h> | 28 | #include <linux/rtnetlink.h> |
29 | #include <linux/compat.h> | 29 | #include <linux/compat.h> |
30 | #include <net/net_namespace.h> | ||
30 | 31 | ||
32 | #include "datapath.h" | ||
31 | #include "vport.h" | 33 | #include "vport.h" |
32 | #include "vport-internal_dev.h" | 34 | #include "vport-internal_dev.h" |
33 | 35 | ||
@@ -67,9 +69,9 @@ void ovs_vport_exit(void) | |||
67 | kfree(dev_table); | 69 | kfree(dev_table); |
68 | } | 70 | } |
69 | 71 | ||
70 | static struct hlist_head *hash_bucket(const char *name) | 72 | static struct hlist_head *hash_bucket(struct net *net, const char *name) |
71 | { | 73 | { |
72 | unsigned int hash = full_name_hash(name, strlen(name)); | 74 | unsigned int hash = jhash(name, strlen(name), (unsigned long) net); |
73 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; | 75 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; |
74 | } | 76 | } |
75 | 77 | ||
@@ -80,14 +82,15 @@ static struct hlist_head *hash_bucket(const char *name) | |||
80 | * | 82 | * |
81 | * Must be called with RTNL or RCU read lock. | 83 | * Must be called with RTNL or RCU read lock. |
82 | */ | 84 | */ |
83 | struct vport *ovs_vport_locate(const char *name) | 85 | struct vport *ovs_vport_locate(struct net *net, const char *name) |
84 | { | 86 | { |
85 | struct hlist_head *bucket = hash_bucket(name); | 87 | struct hlist_head *bucket = hash_bucket(net, name); |
86 | struct vport *vport; | 88 | struct vport *vport; |
87 | struct hlist_node *node; | 89 | struct hlist_node *node; |
88 | 90 | ||
89 | hlist_for_each_entry_rcu(vport, node, bucket, hash_node) | 91 | hlist_for_each_entry_rcu(vport, node, bucket, hash_node) |
90 | if (!strcmp(name, vport->ops->get_name(vport))) | 92 | if (!strcmp(name, vport->ops->get_name(vport)) && |
93 | net_eq(ovs_dp_get_net(vport->dp), net)) | ||
91 | return vport; | 94 | return vport; |
92 | 95 | ||
93 | return NULL; | 96 | return NULL; |
@@ -170,14 +173,17 @@ struct vport *ovs_vport_add(const struct vport_parms *parms) | |||
170 | 173 | ||
171 | for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { | 174 | for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { |
172 | if (vport_ops_list[i]->type == parms->type) { | 175 | if (vport_ops_list[i]->type == parms->type) { |
176 | struct hlist_head *bucket; | ||
177 | |||
173 | vport = vport_ops_list[i]->create(parms); | 178 | vport = vport_ops_list[i]->create(parms); |
174 | if (IS_ERR(vport)) { | 179 | if (IS_ERR(vport)) { |
175 | err = PTR_ERR(vport); | 180 | err = PTR_ERR(vport); |
176 | goto out; | 181 | goto out; |
177 | } | 182 | } |
178 | 183 | ||
179 | hlist_add_head_rcu(&vport->hash_node, | 184 | bucket = hash_bucket(ovs_dp_get_net(vport->dp), |
180 | hash_bucket(vport->ops->get_name(vport))); | 185 | vport->ops->get_name(vport)); |
186 | hlist_add_head_rcu(&vport->hash_node, bucket); | ||
181 | return vport; | 187 | return vport; |
182 | } | 188 | } |
183 | } | 189 | } |
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index aac680ca2b06..97cef08d981b 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define VPORT_H 1 | 20 | #define VPORT_H 1 |
21 | 21 | ||
22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/netlink.h> | ||
23 | #include <linux/openvswitch.h> | 24 | #include <linux/openvswitch.h> |
24 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
25 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
@@ -38,7 +39,7 @@ void ovs_vport_exit(void); | |||
38 | struct vport *ovs_vport_add(const struct vport_parms *); | 39 | struct vport *ovs_vport_add(const struct vport_parms *); |
39 | void ovs_vport_del(struct vport *); | 40 | void ovs_vport_del(struct vport *); |
40 | 41 | ||
41 | struct vport *ovs_vport_locate(const char *name); | 42 | struct vport *ovs_vport_locate(struct net *net, const char *name); |
42 | 43 | ||
43 | void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *); | 44 | void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *); |
44 | 45 | ||