aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipmr.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-01 12:15:08 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-04 00:50:53 -0400
commita8c9486b816f74d4645144db9e8fa2f711c1fc4b (patch)
tree950214451e4bb2172cd9beb8f27a415664f77909 /net/ipv4/ipmr.c
parent4c9687098f245601e9d94178715ee03afbcc6f80 (diff)
ipmr: RCU protection for mfc_cache_array
Use RCU & RTNL protection for mfc_cache_array[] ipmr_cache_find() is called under rcu_read_lock(); Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ipmr.c')
-rw-r--r--net/ipv4/ipmr.c87
1 files changed, 47 insertions, 40 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index e2db2ea616ff..cbb6dabe024f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -577,11 +577,18 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
577 return 0; 577 return 0;
578} 578}
579 579
580static inline void ipmr_cache_free(struct mfc_cache *c) 580static void ipmr_cache_free_rcu(struct rcu_head *head)
581{ 581{
582 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
583
582 kmem_cache_free(mrt_cachep, c); 584 kmem_cache_free(mrt_cachep, c);
583} 585}
584 586
587static inline void ipmr_cache_free(struct mfc_cache *c)
588{
589 call_rcu(&c->rcu, ipmr_cache_free_rcu);
590}
591
585/* Destroy an unresolved cache entry, killing queued skbs 592/* Destroy an unresolved cache entry, killing queued skbs
586 and reporting error to netlink readers. 593 and reporting error to netlink readers.
587 */ 594 */
@@ -781,6 +788,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
781 return 0; 788 return 0;
782} 789}
783 790
791/* called with rcu_read_lock() */
784static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, 792static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
785 __be32 origin, 793 __be32 origin,
786 __be32 mcastgrp) 794 __be32 mcastgrp)
@@ -788,7 +796,7 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
788 int line = MFC_HASH(mcastgrp, origin); 796 int line = MFC_HASH(mcastgrp, origin);
789 struct mfc_cache *c; 797 struct mfc_cache *c;
790 798
791 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { 799 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
792 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) 800 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
793 return c; 801 return c;
794 } 802 }
@@ -801,19 +809,20 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
801static struct mfc_cache *ipmr_cache_alloc(void) 809static struct mfc_cache *ipmr_cache_alloc(void)
802{ 810{
803 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 811 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
804 if (c == NULL) 812
805 return NULL; 813 if (c)
806 c->mfc_un.res.minvif = MAXVIFS; 814 c->mfc_un.res.minvif = MAXVIFS;
807 return c; 815 return c;
808} 816}
809 817
810static struct mfc_cache *ipmr_cache_alloc_unres(void) 818static struct mfc_cache *ipmr_cache_alloc_unres(void)
811{ 819{
812 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 820 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
813 if (c == NULL) 821
814 return NULL; 822 if (c) {
815 skb_queue_head_init(&c->mfc_un.unres.unresolved); 823 skb_queue_head_init(&c->mfc_un.unres.unresolved);
816 c->mfc_un.unres.expires = jiffies + 10*HZ; 824 c->mfc_un.unres.expires = jiffies + 10*HZ;
825 }
817 return c; 826 return c;
818} 827}
819 828
@@ -1040,9 +1049,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1040 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { 1049 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1041 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1050 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1042 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1051 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1043 write_lock_bh(&mrt_lock); 1052 list_del_rcu(&c->list);
1044 list_del(&c->list);
1045 write_unlock_bh(&mrt_lock);
1046 1053
1047 ipmr_cache_free(c); 1054 ipmr_cache_free(c);
1048 return 0; 1055 return 0;
@@ -1095,9 +1102,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1095 if (!mrtsock) 1102 if (!mrtsock)
1096 c->mfc_flags |= MFC_STATIC; 1103 c->mfc_flags |= MFC_STATIC;
1097 1104
1098 write_lock_bh(&mrt_lock); 1105 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1099 list_add(&c->list, &mrt->mfc_cache_array[line]);
1100 write_unlock_bh(&mrt_lock);
1101 1106
1102 /* 1107 /*
1103 * Check to see if we resolved a queued list. If so we 1108 * Check to see if we resolved a queued list. If so we
@@ -1149,12 +1154,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
1149 */ 1154 */
1150 for (i = 0; i < MFC_LINES; i++) { 1155 for (i = 0; i < MFC_LINES; i++) {
1151 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1156 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1152 if (c->mfc_flags&MFC_STATIC) 1157 if (c->mfc_flags & MFC_STATIC)
1153 continue; 1158 continue;
1154 write_lock_bh(&mrt_lock); 1159 list_del_rcu(&c->list);
1155 list_del(&c->list);
1156 write_unlock_bh(&mrt_lock);
1157
1158 ipmr_cache_free(c); 1160 ipmr_cache_free(c);
1159 } 1161 }
1160 } 1162 }
@@ -1422,19 +1424,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1422 if (copy_from_user(&sr, arg, sizeof(sr))) 1424 if (copy_from_user(&sr, arg, sizeof(sr)))
1423 return -EFAULT; 1425 return -EFAULT;
1424 1426
1425 read_lock(&mrt_lock); 1427 rcu_read_lock();
1426 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1428 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1427 if (c) { 1429 if (c) {
1428 sr.pktcnt = c->mfc_un.res.pkt; 1430 sr.pktcnt = c->mfc_un.res.pkt;
1429 sr.bytecnt = c->mfc_un.res.bytes; 1431 sr.bytecnt = c->mfc_un.res.bytes;
1430 sr.wrong_if = c->mfc_un.res.wrong_if; 1432 sr.wrong_if = c->mfc_un.res.wrong_if;
1431 read_unlock(&mrt_lock); 1433 rcu_read_unlock();
1432 1434
1433 if (copy_to_user(arg, &sr, sizeof(sr))) 1435 if (copy_to_user(arg, &sr, sizeof(sr)))
1434 return -EFAULT; 1436 return -EFAULT;
1435 return 0; 1437 return 0;
1436 } 1438 }
1437 read_unlock(&mrt_lock); 1439 rcu_read_unlock();
1438 return -EADDRNOTAVAIL; 1440 return -EADDRNOTAVAIL;
1439 default: 1441 default:
1440 return -ENOIOCTLCMD; 1442 return -ENOIOCTLCMD;
@@ -1764,7 +1766,7 @@ int ip_mr_input(struct sk_buff *skb)
1764 } 1766 }
1765 } 1767 }
1766 1768
1767 read_lock(&mrt_lock); 1769 /* already under rcu_read_lock() */
1768 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1770 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1769 1771
1770 /* 1772 /*
@@ -1776,13 +1778,12 @@ int ip_mr_input(struct sk_buff *skb)
1776 if (local) { 1778 if (local) {
1777 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1779 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1778 ip_local_deliver(skb); 1780 ip_local_deliver(skb);
1779 if (skb2 == NULL) { 1781 if (skb2 == NULL)
1780 read_unlock(&mrt_lock);
1781 return -ENOBUFS; 1782 return -ENOBUFS;
1782 }
1783 skb = skb2; 1783 skb = skb2;
1784 } 1784 }
1785 1785
1786 read_lock(&mrt_lock);
1786 vif = ipmr_find_vif(mrt, skb->dev); 1787 vif = ipmr_find_vif(mrt, skb->dev);
1787 if (vif >= 0) { 1788 if (vif >= 0) {
1788 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 1789 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
@@ -1795,8 +1796,8 @@ int ip_mr_input(struct sk_buff *skb)
1795 return -ENODEV; 1796 return -ENODEV;
1796 } 1797 }
1797 1798
1799 read_lock(&mrt_lock);
1798 ip_mr_forward(net, mrt, skb, cache, local); 1800 ip_mr_forward(net, mrt, skb, cache, local);
1799
1800 read_unlock(&mrt_lock); 1801 read_unlock(&mrt_lock);
1801 1802
1802 if (local) 1803 if (local)
@@ -1963,7 +1964,7 @@ int ipmr_get_route(struct net *net,
1963 if (mrt == NULL) 1964 if (mrt == NULL)
1964 return -ENOENT; 1965 return -ENOENT;
1965 1966
1966 read_lock(&mrt_lock); 1967 rcu_read_lock();
1967 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); 1968 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
1968 1969
1969 if (cache == NULL) { 1970 if (cache == NULL) {
@@ -1973,18 +1974,21 @@ int ipmr_get_route(struct net *net,
1973 int vif; 1974 int vif;
1974 1975
1975 if (nowait) { 1976 if (nowait) {
1976 read_unlock(&mrt_lock); 1977 rcu_read_unlock();
1977 return -EAGAIN; 1978 return -EAGAIN;
1978 } 1979 }
1979 1980
1980 dev = skb->dev; 1981 dev = skb->dev;
1982 read_lock(&mrt_lock);
1981 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) { 1983 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
1982 read_unlock(&mrt_lock); 1984 read_unlock(&mrt_lock);
1985 rcu_read_unlock();
1983 return -ENODEV; 1986 return -ENODEV;
1984 } 1987 }
1985 skb2 = skb_clone(skb, GFP_ATOMIC); 1988 skb2 = skb_clone(skb, GFP_ATOMIC);
1986 if (!skb2) { 1989 if (!skb2) {
1987 read_unlock(&mrt_lock); 1990 read_unlock(&mrt_lock);
1991 rcu_read_unlock();
1988 return -ENOMEM; 1992 return -ENOMEM;
1989 } 1993 }
1990 1994
@@ -1997,13 +2001,16 @@ int ipmr_get_route(struct net *net,
1997 iph->version = 0; 2001 iph->version = 0;
1998 err = ipmr_cache_unresolved(mrt, vif, skb2); 2002 err = ipmr_cache_unresolved(mrt, vif, skb2);
1999 read_unlock(&mrt_lock); 2003 read_unlock(&mrt_lock);
2004 rcu_read_unlock();
2000 return err; 2005 return err;
2001 } 2006 }
2002 2007
2003 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 2008 read_lock(&mrt_lock);
2009 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2004 cache->mfc_flags |= MFC_NOTIFY; 2010 cache->mfc_flags |= MFC_NOTIFY;
2005 err = __ipmr_fill_mroute(mrt, skb, cache, rtm); 2011 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2006 read_unlock(&mrt_lock); 2012 read_unlock(&mrt_lock);
2013 rcu_read_unlock();
2007 return err; 2014 return err;
2008} 2015}
2009 2016
@@ -2055,14 +2062,14 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2055 s_h = cb->args[1]; 2062 s_h = cb->args[1];
2056 s_e = cb->args[2]; 2063 s_e = cb->args[2];
2057 2064
2058 read_lock(&mrt_lock); 2065 rcu_read_lock();
2059 ipmr_for_each_table(mrt, net) { 2066 ipmr_for_each_table(mrt, net) {
2060 if (t < s_t) 2067 if (t < s_t)
2061 goto next_table; 2068 goto next_table;
2062 if (t > s_t) 2069 if (t > s_t)
2063 s_h = 0; 2070 s_h = 0;
2064 for (h = s_h; h < MFC_LINES; h++) { 2071 for (h = s_h; h < MFC_LINES; h++) {
2065 list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) { 2072 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2066 if (e < s_e) 2073 if (e < s_e)
2067 goto next_entry; 2074 goto next_entry;
2068 if (ipmr_fill_mroute(mrt, skb, 2075 if (ipmr_fill_mroute(mrt, skb,
@@ -2080,7 +2087,7 @@ next_table:
2080 t++; 2087 t++;
2081 } 2088 }
2082done: 2089done:
2083 read_unlock(&mrt_lock); 2090 rcu_read_unlock();
2084 2091
2085 cb->args[2] = e; 2092 cb->args[2] = e;
2086 cb->args[1] = h; 2093 cb->args[1] = h;
@@ -2213,14 +2220,14 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2213 struct mr_table *mrt = it->mrt; 2220 struct mr_table *mrt = it->mrt;
2214 struct mfc_cache *mfc; 2221 struct mfc_cache *mfc;
2215 2222
2216 read_lock(&mrt_lock); 2223 rcu_read_lock();
2217 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { 2224 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2218 it->cache = &mrt->mfc_cache_array[it->ct]; 2225 it->cache = &mrt->mfc_cache_array[it->ct];
2219 list_for_each_entry(mfc, it->cache, list) 2226 list_for_each_entry_rcu(mfc, it->cache, list)
2220 if (pos-- == 0) 2227 if (pos-- == 0)
2221 return mfc; 2228 return mfc;
2222 } 2229 }
2223 read_unlock(&mrt_lock); 2230 rcu_read_unlock();
2224 2231
2225 spin_lock_bh(&mfc_unres_lock); 2232 spin_lock_bh(&mfc_unres_lock);
2226 it->cache = &mrt->mfc_unres_queue; 2233 it->cache = &mrt->mfc_unres_queue;
@@ -2279,7 +2286,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2279 } 2286 }
2280 2287
2281 /* exhausted cache_array, show unresolved */ 2288 /* exhausted cache_array, show unresolved */
2282 read_unlock(&mrt_lock); 2289 rcu_read_unlock();
2283 it->cache = &mrt->mfc_unres_queue; 2290 it->cache = &mrt->mfc_unres_queue;
2284 it->ct = 0; 2291 it->ct = 0;
2285 2292
@@ -2302,7 +2309,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2302 if (it->cache == &mrt->mfc_unres_queue) 2309 if (it->cache == &mrt->mfc_unres_queue)
2303 spin_unlock_bh(&mfc_unres_lock); 2310 spin_unlock_bh(&mfc_unres_lock);
2304 else if (it->cache == &mrt->mfc_cache_array[it->ct]) 2311 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2305 read_unlock(&mrt_lock); 2312 rcu_read_unlock();
2306} 2313}
2307 2314
2308static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 2315static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
@@ -2426,7 +2433,7 @@ int __init ip_mr_init(void)
2426 2433
2427 mrt_cachep = kmem_cache_create("ip_mrt_cache", 2434 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2428 sizeof(struct mfc_cache), 2435 sizeof(struct mfc_cache),
2429 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2436 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2430 NULL); 2437 NULL);
2431 if (!mrt_cachep) 2438 if (!mrt_cachep)
2432 return -ENOMEM; 2439 return -ENOMEM;