aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--net/core/dev.c25
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/ipmr.c10
4 files changed, 25 insertions, 14 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 470f7c951afb..55c2086e1f06 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1331,7 +1331,7 @@ struct softnet_data {
1331 struct sk_buff *completion_queue; 1331 struct sk_buff *completion_queue;
1332 1332
1333 /* Elements below can be accessed between CPUs for RPS */ 1333 /* Elements below can be accessed between CPUs for RPS */
1334#ifdef CONFIG_SMP 1334#ifdef CONFIG_RPS
1335 struct call_single_data csd ____cacheline_aligned_in_smp; 1335 struct call_single_data csd ____cacheline_aligned_in_smp;
1336#endif 1336#endif
1337 struct sk_buff_head input_pkt_queue; 1337 struct sk_buff_head input_pkt_queue;
diff --git a/net/core/dev.c b/net/core/dev.c
index 876b1112d5ba..e8041eb76ac1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2206,6 +2206,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2206/* 2206/*
2207 * get_rps_cpu is called from netif_receive_skb and returns the target 2207 * get_rps_cpu is called from netif_receive_skb and returns the target
2208 * CPU from the RPS map of the receiving queue for a given skb. 2208 * CPU from the RPS map of the receiving queue for a given skb.
2209 * rcu_read_lock must be held on entry.
2209 */ 2210 */
2210static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) 2211static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2211{ 2212{
@@ -2217,8 +2218,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2217 u8 ip_proto; 2218 u8 ip_proto;
2218 u32 addr1, addr2, ports, ihl; 2219 u32 addr1, addr2, ports, ihl;
2219 2220
2220 rcu_read_lock();
2221
2222 if (skb_rx_queue_recorded(skb)) { 2221 if (skb_rx_queue_recorded(skb)) {
2223 u16 index = skb_get_rx_queue(skb); 2222 u16 index = skb_get_rx_queue(skb);
2224 if (unlikely(index >= dev->num_rx_queues)) { 2223 if (unlikely(index >= dev->num_rx_queues)) {
@@ -2296,7 +2295,6 @@ got_hash:
2296 } 2295 }
2297 2296
2298done: 2297done:
2299 rcu_read_unlock();
2300 return cpu; 2298 return cpu;
2301} 2299}
2302 2300
@@ -2392,7 +2390,7 @@ enqueue:
2392 2390
2393int netif_rx(struct sk_buff *skb) 2391int netif_rx(struct sk_buff *skb)
2394{ 2392{
2395 int cpu; 2393 int ret;
2396 2394
2397 /* if netpoll wants it, pretend we never saw it */ 2395 /* if netpoll wants it, pretend we never saw it */
2398 if (netpoll_rx(skb)) 2396 if (netpoll_rx(skb))
@@ -2402,14 +2400,21 @@ int netif_rx(struct sk_buff *skb)
2402 net_timestamp(skb); 2400 net_timestamp(skb);
2403 2401
2404#ifdef CONFIG_RPS 2402#ifdef CONFIG_RPS
2405 cpu = get_rps_cpu(skb->dev, skb); 2403 {
2406 if (cpu < 0) 2404 int cpu;
2407 cpu = smp_processor_id(); 2405
2406 rcu_read_lock();
2407 cpu = get_rps_cpu(skb->dev, skb);
2408 if (cpu < 0)
2409 cpu = smp_processor_id();
2410 ret = enqueue_to_backlog(skb, cpu);
2411 rcu_read_unlock();
2412 }
2408#else 2413#else
2409 cpu = smp_processor_id(); 2414 ret = enqueue_to_backlog(skb, get_cpu());
2415 put_cpu();
2410#endif 2416#endif
2411 2417 return ret;
2412 return enqueue_to_backlog(skb, cpu);
2413} 2418}
2414EXPORT_SYMBOL(netif_rx); 2419EXPORT_SYMBOL(netif_rx);
2415 2420
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index be597749c385..8e3a1fd938ab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -252,7 +252,7 @@ config IP_MROUTE
252 252
253config IP_MROUTE_MULTIPLE_TABLES 253config IP_MROUTE_MULTIPLE_TABLES
254 bool "IP: multicast policy routing" 254 bool "IP: multicast policy routing"
255 depends on IP_ADVANCED_ROUTER 255 depends on IP_MROUTE && IP_ADVANCED_ROUTER
256 select FIB_RULES 256 select FIB_RULES
257 help 257 help
258 Normally, a multicast router runs a userspace daemon and decides 258 Normally, a multicast router runs a userspace daemon and decides
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5df5fd74c6d1..7d8a2bcecb76 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -71,6 +71,9 @@
71 71
72struct mr_table { 72struct mr_table {
73 struct list_head list; 73 struct list_head list;
74#ifdef CONFIG_NET_NS
75 struct net *net;
76#endif
74 u32 id; 77 u32 id;
75 struct sock *mroute_sk; 78 struct sock *mroute_sk;
76 struct timer_list ipmr_expire_timer; 79 struct timer_list ipmr_expire_timer;
@@ -308,6 +311,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
308 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
309 if (mrt == NULL) 312 if (mrt == NULL)
310 return NULL; 313 return NULL;
314 write_pnet(&mrt->net, net);
311 mrt->id = id; 315 mrt->id = id;
312 316
313 /* Forwarding cache */ 317 /* Forwarding cache */
@@ -580,7 +584,7 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
580 584
581static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 585static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
582{ 586{
583 struct net *net = NULL; //mrt->net; 587 struct net *net = read_pnet(&mrt->net);
584 struct sk_buff *skb; 588 struct sk_buff *skb;
585 struct nlmsgerr *e; 589 struct nlmsgerr *e;
586 590
@@ -1089,12 +1093,14 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1089 * Check to see if we resolved a queued list. If so we 1093 * Check to see if we resolved a queued list. If so we
1090 * need to send on the frames and tidy up. 1094 * need to send on the frames and tidy up.
1091 */ 1095 */
1096 found = false;
1092 spin_lock_bh(&mfc_unres_lock); 1097 spin_lock_bh(&mfc_unres_lock);
1093 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { 1098 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1094 if (uc->mfc_origin == c->mfc_origin && 1099 if (uc->mfc_origin == c->mfc_origin &&
1095 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1100 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1096 list_del(&uc->list); 1101 list_del(&uc->list);
1097 atomic_dec(&mrt->cache_resolve_queue_len); 1102 atomic_dec(&mrt->cache_resolve_queue_len);
1103 found = true;
1098 break; 1104 break;
1099 } 1105 }
1100 } 1106 }
@@ -1102,7 +1108,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1102 del_timer(&mrt->ipmr_expire_timer); 1108 del_timer(&mrt->ipmr_expire_timer);
1103 spin_unlock_bh(&mfc_unres_lock); 1109 spin_unlock_bh(&mfc_unres_lock);
1104 1110
1105 if (uc) { 1111 if (found) {
1106 ipmr_cache_resolve(net, mrt, uc, c); 1112 ipmr_cache_resolve(net, mrt, uc, c);
1107 ipmr_cache_free(uc); 1113 ipmr_cache_free(uc);
1108 } 1114 }