aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-01-14 18:17:06 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-15 18:26:16 -0500
commit5055c371bfd53fd369b895051b541318c2bad495 (patch)
tree28748ee77408e82a64ae13dbb4bd65f1eff6aa26 /net/ipv4/route.c
parent0799c2d6f42db2b275b6479035f5b7a30ef4ee39 (diff)
ipv4: per cpu uncached list
RAW sockets with hdrinc suffer from contention on rt_uncached_lock spinlock. One solution is to use percpu lists, since most routes are destroyed by the cpu that created them. It is unclear why we even have to put these routes in uncached_list, as all outgoing packets should be freed when a device is dismantled. Signed-off-by: Eric Dumazet <edumazet@google.com> Fixes: caacf05e5ad1 ("ipv4: Properly purge netdev references on uncached routes.") Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c46
1 files changed, 33 insertions, 13 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6a2155b02602..ce112d0f2698 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1325,14 +1325,22 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1325 return ret; 1325 return ret;
1326} 1326}
1327 1327
1328static DEFINE_SPINLOCK(rt_uncached_lock); 1328struct uncached_list {
1329static LIST_HEAD(rt_uncached_list); 1329 spinlock_t lock;
1330 struct list_head head;
1331};
1332
1333static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1330 1334
1331static void rt_add_uncached_list(struct rtable *rt) 1335static void rt_add_uncached_list(struct rtable *rt)
1332{ 1336{
1333 spin_lock_bh(&rt_uncached_lock); 1337 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1334 list_add_tail(&rt->rt_uncached, &rt_uncached_list); 1338
1335 spin_unlock_bh(&rt_uncached_lock); 1339 rt->rt_uncached_list = ul;
1340
1341 spin_lock_bh(&ul->lock);
1342 list_add_tail(&rt->rt_uncached, &ul->head);
1343 spin_unlock_bh(&ul->lock);
1336} 1344}
1337 1345
1338static void ipv4_dst_destroy(struct dst_entry *dst) 1346static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1340,27 +1348,32 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1340 struct rtable *rt = (struct rtable *) dst; 1348 struct rtable *rt = (struct rtable *) dst;
1341 1349
1342 if (!list_empty(&rt->rt_uncached)) { 1350 if (!list_empty(&rt->rt_uncached)) {
1343 spin_lock_bh(&rt_uncached_lock); 1351 struct uncached_list *ul = rt->rt_uncached_list;
1352
1353 spin_lock_bh(&ul->lock);
1344 list_del(&rt->rt_uncached); 1354 list_del(&rt->rt_uncached);
1345 spin_unlock_bh(&rt_uncached_lock); 1355 spin_unlock_bh(&ul->lock);
1346 } 1356 }
1347} 1357}
1348 1358
1349void rt_flush_dev(struct net_device *dev) 1359void rt_flush_dev(struct net_device *dev)
1350{ 1360{
1351 if (!list_empty(&rt_uncached_list)) { 1361 struct net *net = dev_net(dev);
1352 struct net *net = dev_net(dev); 1362 struct rtable *rt;
1353 struct rtable *rt; 1363 int cpu;
1364
1365 for_each_possible_cpu(cpu) {
1366 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1354 1367
1355 spin_lock_bh(&rt_uncached_lock); 1368 spin_lock_bh(&ul->lock);
1356 list_for_each_entry(rt, &rt_uncached_list, rt_uncached) { 1369 list_for_each_entry(rt, &ul->head, rt_uncached) {
1357 if (rt->dst.dev != dev) 1370 if (rt->dst.dev != dev)
1358 continue; 1371 continue;
1359 rt->dst.dev = net->loopback_dev; 1372 rt->dst.dev = net->loopback_dev;
1360 dev_hold(rt->dst.dev); 1373 dev_hold(rt->dst.dev);
1361 dev_put(dev); 1374 dev_put(dev);
1362 } 1375 }
1363 spin_unlock_bh(&rt_uncached_lock); 1376 spin_unlock_bh(&ul->lock);
1364 } 1377 }
1365} 1378}
1366 1379
@@ -2717,6 +2730,7 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2717int __init ip_rt_init(void) 2730int __init ip_rt_init(void)
2718{ 2731{
2719 int rc = 0; 2732 int rc = 0;
2733 int cpu;
2720 2734
2721 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); 2735 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2722 if (!ip_idents) 2736 if (!ip_idents)
@@ -2724,6 +2738,12 @@ int __init ip_rt_init(void)
2724 2738
2725 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); 2739 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2726 2740
2741 for_each_possible_cpu(cpu) {
2742 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
2743
2744 INIT_LIST_HEAD(&ul->head);
2745 spin_lock_init(&ul->lock);
2746 }
2727#ifdef CONFIG_IP_ROUTE_CLASSID 2747#ifdef CONFIG_IP_ROUTE_CLASSID
2728 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 2748 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2729 if (!ip_rt_acct) 2749 if (!ip_rt_acct)