diff options
author | David S. Miller <davem@davemloft.net> | 2015-04-06 21:52:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-06 22:34:15 -0400 |
commit | c85d6975ef923cffdd56de3e0e6aba0977282cff (patch) | |
tree | cb497deea01827951809c9c7c0f1c22780c146be /net/core | |
parent | 60302ff631f0f3eac0ec592e128b776f0676b397 (diff) | |
parent | f22e6e847115abc3a0e2ad7bb18d243d42275af1 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/mellanox/mlx4/cmd.c
net/core/fib_rules.c
net/ipv4/fib_frontend.c
The fib_rules.c and fib_frontend.c conflicts were locking adjustments
in 'net' overlapping addition and removal of code in 'net-next'.
The mlx4 conflict was a bug fix in 'net' happening in the same
place a constant was being replaced with a more suitable macro.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 4 | ||||
-rw-r--r-- | net/core/fib_rules.c | 2 | ||||
-rw-r--r-- | net/core/net_namespace.c | 28 | ||||
-rw-r--r-- | net/core/sock.c | 19 |
4 files changed, 35 insertions, 18 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 26622d614f81..3b3965288f52 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2870,7 +2870,9 @@ static void skb_update_prio(struct sk_buff *skb) | |||
2870 | #define skb_update_prio(skb) | 2870 | #define skb_update_prio(skb) |
2871 | #endif | 2871 | #endif |
2872 | 2872 | ||
2873 | static DEFINE_PER_CPU(int, xmit_recursion); | 2873 | DEFINE_PER_CPU(int, xmit_recursion); |
2874 | EXPORT_SYMBOL(xmit_recursion); | ||
2875 | |||
2874 | #define RECURSION_LIMIT 10 | 2876 | #define RECURSION_LIMIT 10 |
2875 | 2877 | ||
2876 | /** | 2878 | /** |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 68ea6950cad1..9a12668f7d62 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -165,9 +165,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
165 | 165 | ||
166 | spin_lock(&net->rules_mod_lock); | 166 | spin_lock(&net->rules_mod_lock); |
167 | list_del_rcu(&ops->list); | 167 | list_del_rcu(&ops->list); |
168 | fib_rules_cleanup_ops(ops); | ||
169 | spin_unlock(&net->rules_mod_lock); | 168 | spin_unlock(&net->rules_mod_lock); |
170 | 169 | ||
170 | fib_rules_cleanup_ops(ops); | ||
171 | kfree_rcu(ops, rcu); | 171 | kfree_rcu(ops, rcu); |
172 | } | 172 | } |
173 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 173 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index ce6396a75b8b..e7345d9031df 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -198,8 +198,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc) | |||
198 | */ | 198 | */ |
199 | int peernet2id(struct net *net, struct net *peer) | 199 | int peernet2id(struct net *net, struct net *peer) |
200 | { | 200 | { |
201 | int id = __peernet2id(net, peer, true); | 201 | bool alloc = atomic_read(&peer->count) == 0 ? false : true; |
202 | int id; | ||
202 | 203 | ||
204 | id = __peernet2id(net, peer, alloc); | ||
203 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; | 205 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; |
204 | } | 206 | } |
205 | EXPORT_SYMBOL(peernet2id); | 207 | EXPORT_SYMBOL(peernet2id); |
@@ -338,7 +340,7 @@ static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ | |||
338 | static void cleanup_net(struct work_struct *work) | 340 | static void cleanup_net(struct work_struct *work) |
339 | { | 341 | { |
340 | const struct pernet_operations *ops; | 342 | const struct pernet_operations *ops; |
341 | struct net *net, *tmp, *peer; | 343 | struct net *net, *tmp; |
342 | struct list_head net_kill_list; | 344 | struct list_head net_kill_list; |
343 | LIST_HEAD(net_exit_list); | 345 | LIST_HEAD(net_exit_list); |
344 | 346 | ||
@@ -354,6 +356,14 @@ static void cleanup_net(struct work_struct *work) | |||
354 | list_for_each_entry(net, &net_kill_list, cleanup_list) { | 356 | list_for_each_entry(net, &net_kill_list, cleanup_list) { |
355 | list_del_rcu(&net->list); | 357 | list_del_rcu(&net->list); |
356 | list_add_tail(&net->exit_list, &net_exit_list); | 358 | list_add_tail(&net->exit_list, &net_exit_list); |
359 | for_each_net(tmp) { | ||
360 | int id = __peernet2id(tmp, net, false); | ||
361 | |||
362 | if (id >= 0) | ||
363 | idr_remove(&tmp->netns_ids, id); | ||
364 | } | ||
365 | idr_destroy(&net->netns_ids); | ||
366 | |||
357 | } | 367 | } |
358 | rtnl_unlock(); | 368 | rtnl_unlock(); |
359 | 369 | ||
@@ -379,26 +389,12 @@ static void cleanup_net(struct work_struct *work) | |||
379 | */ | 389 | */ |
380 | rcu_barrier(); | 390 | rcu_barrier(); |
381 | 391 | ||
382 | rtnl_lock(); | ||
383 | /* Finally it is safe to free my network namespace structure */ | 392 | /* Finally it is safe to free my network namespace structure */ |
384 | list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { | 393 | list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { |
385 | /* Unreference net from all peers (no need to loop over | ||
386 | * net_exit_list because idr_destroy() will be called for each | ||
387 | * element of this list. | ||
388 | */ | ||
389 | for_each_net(peer) { | ||
390 | int id = __peernet2id(peer, net, false); | ||
391 | |||
392 | if (id >= 0) | ||
393 | idr_remove(&peer->netns_ids, id); | ||
394 | } | ||
395 | idr_destroy(&net->netns_ids); | ||
396 | |||
397 | list_del_init(&net->exit_list); | 394 | list_del_init(&net->exit_list); |
398 | put_user_ns(net->user_ns); | 395 | put_user_ns(net->user_ns); |
399 | net_drop_ns(net); | 396 | net_drop_ns(net); |
400 | } | 397 | } |
401 | rtnl_unlock(); | ||
402 | } | 398 | } |
403 | static DECLARE_WORK(net_cleanup_work, cleanup_net); | 399 | static DECLARE_WORK(net_cleanup_work, cleanup_net); |
404 | 400 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 119ae464b44a..654e38a99759 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | |||
653 | sock_reset_flag(sk, bit); | 653 | sock_reset_flag(sk, bit); |
654 | } | 654 | } |
655 | 655 | ||
656 | bool sk_mc_loop(struct sock *sk) | ||
657 | { | ||
658 | if (dev_recursion_level()) | ||
659 | return false; | ||
660 | if (!sk) | ||
661 | return true; | ||
662 | switch (sk->sk_family) { | ||
663 | case AF_INET: | ||
664 | return inet_sk(sk)->mc_loop; | ||
665 | #if IS_ENABLED(CONFIG_IPV6) | ||
666 | case AF_INET6: | ||
667 | return inet6_sk(sk)->mc_loop; | ||
668 | #endif | ||
669 | } | ||
670 | WARN_ON(1); | ||
671 | return true; | ||
672 | } | ||
673 | EXPORT_SYMBOL(sk_mc_loop); | ||
674 | |||
656 | /* | 675 | /* |
657 | * This is meant for all protocols to use and covers goings on | 676 | * This is meant for all protocols to use and covers goings on |
658 | * at the socket level. Everything here is generic. | 677 | * at the socket level. Everything here is generic. |