aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-04-13 01:03:21 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-13 17:49:33 -0400
commit862465f2e7e90975e7bf0ecfbb171dd3adedd950 (patch)
tree281c43f90130cc23eb581c702afaf4ab226dbff5
parentd658f8a0e63b6476148162aa7a3ffffc58dcad52 (diff)
ipv4: ipmr: convert struct mfc_cache to struct list_head
Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/mroute.h2
-rw-r--r--include/net/netns/ipv4.h4
-rw-r--r--net/ipv4/ipmr.c125
3 files changed, 64 insertions, 67 deletions
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index de7780a6dd32..7ff6c77d6008 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -191,7 +191,7 @@ struct vif_device {
191#define VIFF_STATIC 0x8000 191#define VIFF_STATIC 0x8000
192 192
193struct mfc_cache { 193struct mfc_cache {
194 struct mfc_cache *next; /* Next entry on cache line */ 194 struct list_head list;
195 __be32 mfc_mcastgrp; /* Group the entry belongs to */ 195 __be32 mfc_mcastgrp; /* Group the entry belongs to */
196 __be32 mfc_origin; /* Source of packet */ 196 __be32 mfc_origin; /* Source of packet */
197 vifi_t mfc_parent; /* Source interface */ 197 vifi_t mfc_parent; /* Source interface */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index b15e518f952a..5d06429968d5 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -61,8 +61,8 @@ struct netns_ipv4 {
61#ifdef CONFIG_IP_MROUTE 61#ifdef CONFIG_IP_MROUTE
62 struct sock *mroute_sk; 62 struct sock *mroute_sk;
63 struct timer_list ipmr_expire_timer; 63 struct timer_list ipmr_expire_timer;
64 struct mfc_cache *mfc_unres_queue; 64 struct list_head mfc_unres_queue;
65 struct mfc_cache **mfc_cache_array; 65 struct list_head *mfc_cache_array;
66 struct vif_device *vif_table; 66 struct vif_device *vif_table;
67 int maxvif; 67 int maxvif;
68 atomic_t cache_resolve_queue_len; 68 atomic_t cache_resolve_queue_len;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index f8e25c8ba070..21b5edc2f343 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -367,35 +367,32 @@ static void ipmr_expire_process(unsigned long arg)
367 struct net *net = (struct net *)arg; 367 struct net *net = (struct net *)arg;
368 unsigned long now; 368 unsigned long now;
369 unsigned long expires; 369 unsigned long expires;
370 struct mfc_cache *c, **cp; 370 struct mfc_cache *c, *next;
371 371
372 if (!spin_trylock(&mfc_unres_lock)) { 372 if (!spin_trylock(&mfc_unres_lock)) {
373 mod_timer(&net->ipv4.ipmr_expire_timer, jiffies+HZ/10); 373 mod_timer(&net->ipv4.ipmr_expire_timer, jiffies+HZ/10);
374 return; 374 return;
375 } 375 }
376 376
377 if (net->ipv4.mfc_unres_queue == NULL) 377 if (list_empty(&net->ipv4.mfc_unres_queue))
378 goto out; 378 goto out;
379 379
380 now = jiffies; 380 now = jiffies;
381 expires = 10*HZ; 381 expires = 10*HZ;
382 cp = &net->ipv4.mfc_unres_queue;
383 382
384 while ((c=*cp) != NULL) { 383 list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) {
385 if (time_after(c->mfc_un.unres.expires, now)) { 384 if (time_after(c->mfc_un.unres.expires, now)) {
386 unsigned long interval = c->mfc_un.unres.expires - now; 385 unsigned long interval = c->mfc_un.unres.expires - now;
387 if (interval < expires) 386 if (interval < expires)
388 expires = interval; 387 expires = interval;
389 cp = &c->next;
390 continue; 388 continue;
391 } 389 }
392 390
393 *cp = c->next; 391 list_del(&c->list);
394
395 ipmr_destroy_unres(net, c); 392 ipmr_destroy_unres(net, c);
396 } 393 }
397 394
398 if (net->ipv4.mfc_unres_queue != NULL) 395 if (!list_empty(&net->ipv4.mfc_unres_queue))
399 mod_timer(&net->ipv4.ipmr_expire_timer, jiffies + expires); 396 mod_timer(&net->ipv4.ipmr_expire_timer, jiffies + expires);
400 397
401out: 398out:
@@ -537,11 +534,11 @@ static struct mfc_cache *ipmr_cache_find(struct net *net,
537 int line = MFC_HASH(mcastgrp, origin); 534 int line = MFC_HASH(mcastgrp, origin);
538 struct mfc_cache *c; 535 struct mfc_cache *c;
539 536
540 for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { 537 list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) {
541 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) 538 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
542 break; 539 return c;
543 } 540 }
544 return c; 541 return NULL;
545} 542}
546 543
547/* 544/*
@@ -699,18 +696,21 @@ static int ipmr_cache_report(struct net *net,
699static int 696static int
700ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) 697ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
701{ 698{
699 bool found = false;
702 int err; 700 int err;
703 struct mfc_cache *c; 701 struct mfc_cache *c;
704 const struct iphdr *iph = ip_hdr(skb); 702 const struct iphdr *iph = ip_hdr(skb);
705 703
706 spin_lock_bh(&mfc_unres_lock); 704 spin_lock_bh(&mfc_unres_lock);
707 for (c=net->ipv4.mfc_unres_queue; c; c=c->next) { 705 list_for_each_entry(c, &net->ipv4.mfc_unres_queue, list) {
708 if (c->mfc_mcastgrp == iph->daddr && 706 if (c->mfc_mcastgrp == iph->daddr &&
709 c->mfc_origin == iph->saddr) 707 c->mfc_origin == iph->saddr) {
708 found = true;
710 break; 709 break;
710 }
711 } 711 }
712 712
713 if (c == NULL) { 713 if (!found) {
714 /* 714 /*
715 * Create a new entry if allowable 715 * Create a new entry if allowable
716 */ 716 */
@@ -746,8 +746,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
746 } 746 }
747 747
748 atomic_inc(&net->ipv4.cache_resolve_queue_len); 748 atomic_inc(&net->ipv4.cache_resolve_queue_len);
749 c->next = net->ipv4.mfc_unres_queue; 749 list_add(&c->list, &net->ipv4.mfc_unres_queue);
750 net->ipv4.mfc_unres_queue = c;
751 750
752 mod_timer(&net->ipv4.ipmr_expire_timer, c->mfc_un.unres.expires); 751 mod_timer(&net->ipv4.ipmr_expire_timer, c->mfc_un.unres.expires);
753 } 752 }
@@ -774,16 +773,15 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
774static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) 773static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
775{ 774{
776 int line; 775 int line;
777 struct mfc_cache *c, **cp; 776 struct mfc_cache *c, *next;
778 777
779 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 778 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
780 779
781 for (cp = &net->ipv4.mfc_cache_array[line]; 780 list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[line], list) {
782 (c = *cp) != NULL; cp = &c->next) {
783 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 781 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
784 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 782 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
785 write_lock_bh(&mrt_lock); 783 write_lock_bh(&mrt_lock);
786 *cp = c->next; 784 list_del(&c->list);
787 write_unlock_bh(&mrt_lock); 785 write_unlock_bh(&mrt_lock);
788 786
789 ipmr_cache_free(c); 787 ipmr_cache_free(c);
@@ -795,22 +793,24 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
795 793
796static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) 794static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
797{ 795{
796 bool found = false;
798 int line; 797 int line;
799 struct mfc_cache *uc, *c, **cp; 798 struct mfc_cache *uc, *c;
800 799
801 if (mfc->mfcc_parent >= MAXVIFS) 800 if (mfc->mfcc_parent >= MAXVIFS)
802 return -ENFILE; 801 return -ENFILE;
803 802
804 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 803 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
805 804
806 for (cp = &net->ipv4.mfc_cache_array[line]; 805 list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) {
807 (c = *cp) != NULL; cp = &c->next) {
808 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 806 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
809 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) 807 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
808 found = true;
810 break; 809 break;
810 }
811 } 811 }
812 812
813 if (c != NULL) { 813 if (found) {
814 write_lock_bh(&mrt_lock); 814 write_lock_bh(&mrt_lock);
815 c->mfc_parent = mfc->mfcc_parent; 815 c->mfc_parent = mfc->mfcc_parent;
816 ipmr_update_thresholds(net, c, mfc->mfcc_ttls); 816 ipmr_update_thresholds(net, c, mfc->mfcc_ttls);
@@ -835,8 +835,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
835 c->mfc_flags |= MFC_STATIC; 835 c->mfc_flags |= MFC_STATIC;
836 836
837 write_lock_bh(&mrt_lock); 837 write_lock_bh(&mrt_lock);
838 c->next = net->ipv4.mfc_cache_array[line]; 838 list_add(&c->list, &net->ipv4.mfc_cache_array[line]);
839 net->ipv4.mfc_cache_array[line] = c;
840 write_unlock_bh(&mrt_lock); 839 write_unlock_bh(&mrt_lock);
841 840
842 /* 841 /*
@@ -844,16 +843,15 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
844 * need to send on the frames and tidy up. 843 * need to send on the frames and tidy up.
845 */ 844 */
846 spin_lock_bh(&mfc_unres_lock); 845 spin_lock_bh(&mfc_unres_lock);
847 for (cp = &net->ipv4.mfc_unres_queue; (uc=*cp) != NULL; 846 list_for_each_entry(uc, &net->ipv4.mfc_unres_queue, list) {
848 cp = &uc->next) {
849 if (uc->mfc_origin == c->mfc_origin && 847 if (uc->mfc_origin == c->mfc_origin &&
850 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 848 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
851 *cp = uc->next; 849 list_del(&uc->list);
852 atomic_dec(&net->ipv4.cache_resolve_queue_len); 850 atomic_dec(&net->ipv4.cache_resolve_queue_len);
853 break; 851 break;
854 } 852 }
855 } 853 }
856 if (net->ipv4.mfc_unres_queue == NULL) 854 if (list_empty(&net->ipv4.mfc_unres_queue))
857 del_timer(&net->ipv4.ipmr_expire_timer); 855 del_timer(&net->ipv4.ipmr_expire_timer);
858 spin_unlock_bh(&mfc_unres_lock); 856 spin_unlock_bh(&mfc_unres_lock);
859 857
@@ -872,6 +870,7 @@ static void mroute_clean_tables(struct net *net)
872{ 870{
873 int i; 871 int i;
874 LIST_HEAD(list); 872 LIST_HEAD(list);
873 struct mfc_cache *c, *next;
875 874
876 /* 875 /*
877 * Shut down all active vif entries 876 * Shut down all active vif entries
@@ -885,17 +884,12 @@ static void mroute_clean_tables(struct net *net)
885 /* 884 /*
886 * Wipe the cache 885 * Wipe the cache
887 */ 886 */
888 for (i=0; i<MFC_LINES; i++) { 887 for (i = 0; i < MFC_LINES; i++) {
889 struct mfc_cache *c, **cp; 888 list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[i], list) {
890 889 if (c->mfc_flags&MFC_STATIC)
891 cp = &net->ipv4.mfc_cache_array[i];
892 while ((c = *cp) != NULL) {
893 if (c->mfc_flags&MFC_STATIC) {
894 cp = &c->next;
895 continue; 890 continue;
896 }
897 write_lock_bh(&mrt_lock); 891 write_lock_bh(&mrt_lock);
898 *cp = c->next; 892 list_del(&c->list);
899 write_unlock_bh(&mrt_lock); 893 write_unlock_bh(&mrt_lock);
900 894
901 ipmr_cache_free(c); 895 ipmr_cache_free(c);
@@ -903,12 +897,9 @@ static void mroute_clean_tables(struct net *net)
903 } 897 }
904 898
905 if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { 899 if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) {
906 struct mfc_cache *c, **cp;
907
908 spin_lock_bh(&mfc_unres_lock); 900 spin_lock_bh(&mfc_unres_lock);
909 cp = &net->ipv4.mfc_unres_queue; 901 list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) {
910 while ((c = *cp) != NULL) { 902 list_del(&c->list);
911 *cp = c->next;
912 ipmr_destroy_unres(net, c); 903 ipmr_destroy_unres(net, c);
913 } 904 }
914 spin_unlock_bh(&mfc_unres_lock); 905 spin_unlock_bh(&mfc_unres_lock);
@@ -1789,7 +1780,7 @@ static const struct file_operations ipmr_vif_fops = {
1789 1780
1790struct ipmr_mfc_iter { 1781struct ipmr_mfc_iter {
1791 struct seq_net_private p; 1782 struct seq_net_private p;
1792 struct mfc_cache **cache; 1783 struct list_head *cache;
1793 int ct; 1784 int ct;
1794}; 1785};
1795 1786
@@ -1799,18 +1790,18 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
1799{ 1790{
1800 struct mfc_cache *mfc; 1791 struct mfc_cache *mfc;
1801 1792
1802 it->cache = net->ipv4.mfc_cache_array;
1803 read_lock(&mrt_lock); 1793 read_lock(&mrt_lock);
1804 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) 1794 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
1805 for (mfc = net->ipv4.mfc_cache_array[it->ct]; 1795 it->cache = &net->ipv4.mfc_cache_array[it->ct];
1806 mfc; mfc = mfc->next) 1796 list_for_each_entry(mfc, it->cache, list)
1807 if (pos-- == 0) 1797 if (pos-- == 0)
1808 return mfc; 1798 return mfc;
1799 }
1809 read_unlock(&mrt_lock); 1800 read_unlock(&mrt_lock);
1810 1801
1811 it->cache = &net->ipv4.mfc_unres_queue;
1812 spin_lock_bh(&mfc_unres_lock); 1802 spin_lock_bh(&mfc_unres_lock);
1813 for (mfc = net->ipv4.mfc_unres_queue; mfc; mfc = mfc->next) 1803 it->cache = &net->ipv4.mfc_unres_queue;
1804 list_for_each_entry(mfc, it->cache, list)
1814 if (pos-- == 0) 1805 if (pos-- == 0)
1815 return mfc; 1806 return mfc;
1816 spin_unlock_bh(&mfc_unres_lock); 1807 spin_unlock_bh(&mfc_unres_lock);
@@ -1842,18 +1833,19 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1842 if (v == SEQ_START_TOKEN) 1833 if (v == SEQ_START_TOKEN)
1843 return ipmr_mfc_seq_idx(net, seq->private, 0); 1834 return ipmr_mfc_seq_idx(net, seq->private, 0);
1844 1835
1845 if (mfc->next) 1836 if (mfc->list.next != it->cache)
1846 return mfc->next; 1837 return list_entry(mfc->list.next, struct mfc_cache, list);
1847 1838
1848 if (it->cache == &net->ipv4.mfc_unres_queue) 1839 if (it->cache == &net->ipv4.mfc_unres_queue)
1849 goto end_of_list; 1840 goto end_of_list;
1850 1841
1851 BUG_ON(it->cache != net->ipv4.mfc_cache_array); 1842 BUG_ON(it->cache != &net->ipv4.mfc_cache_array[it->ct]);
1852 1843
1853 while (++it->ct < MFC_LINES) { 1844 while (++it->ct < MFC_LINES) {
1854 mfc = net->ipv4.mfc_cache_array[it->ct]; 1845 it->cache = &net->ipv4.mfc_cache_array[it->ct];
1855 if (mfc) 1846 if (list_empty(it->cache))
1856 return mfc; 1847 continue;
1848 return list_first_entry(it->cache, struct mfc_cache, list);
1857 } 1849 }
1858 1850
1859 /* exhausted cache_array, show unresolved */ 1851 /* exhausted cache_array, show unresolved */
@@ -1862,9 +1854,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1862 it->ct = 0; 1854 it->ct = 0;
1863 1855
1864 spin_lock_bh(&mfc_unres_lock); 1856 spin_lock_bh(&mfc_unres_lock);
1865 mfc = net->ipv4.mfc_unres_queue; 1857 if (!list_empty(it->cache))
1866 if (mfc) 1858 return list_first_entry(it->cache, struct mfc_cache, list);
1867 return mfc;
1868 1859
1869 end_of_list: 1860 end_of_list:
1870 spin_unlock_bh(&mfc_unres_lock); 1861 spin_unlock_bh(&mfc_unres_lock);
@@ -1880,7 +1871,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1880 1871
1881 if (it->cache == &net->ipv4.mfc_unres_queue) 1872 if (it->cache == &net->ipv4.mfc_unres_queue)
1882 spin_unlock_bh(&mfc_unres_lock); 1873 spin_unlock_bh(&mfc_unres_lock);
1883 else if (it->cache == net->ipv4.mfc_cache_array) 1874 else if (it->cache == &net->ipv4.mfc_cache_array[it->ct])
1884 read_unlock(&mrt_lock); 1875 read_unlock(&mrt_lock);
1885} 1876}
1886 1877
@@ -1960,6 +1951,7 @@ static const struct net_protocol pim_protocol = {
1960 */ 1951 */
1961static int __net_init ipmr_net_init(struct net *net) 1952static int __net_init ipmr_net_init(struct net *net)
1962{ 1953{
1954 unsigned int i;
1963 int err = 0; 1955 int err = 0;
1964 1956
1965 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), 1957 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
@@ -1971,13 +1963,18 @@ static int __net_init ipmr_net_init(struct net *net)
1971 1963
1972 /* Forwarding cache */ 1964 /* Forwarding cache */
1973 net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, 1965 net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
1974 sizeof(struct mfc_cache *), 1966 sizeof(struct list_head),
1975 GFP_KERNEL); 1967 GFP_KERNEL);
1976 if (!net->ipv4.mfc_cache_array) { 1968 if (!net->ipv4.mfc_cache_array) {
1977 err = -ENOMEM; 1969 err = -ENOMEM;
1978 goto fail_mfc_cache; 1970 goto fail_mfc_cache;
1979 } 1971 }
1980 1972
1973 for (i = 0; i < MFC_LINES; i++)
1974 INIT_LIST_HEAD(&net->ipv4.mfc_cache_array[i]);
1975
1976 INIT_LIST_HEAD(&net->ipv4.mfc_unres_queue);
1977
1981 setup_timer(&net->ipv4.ipmr_expire_timer, ipmr_expire_process, 1978 setup_timer(&net->ipv4.ipmr_expire_timer, ipmr_expire_process,
1982 (unsigned long)net); 1979 (unsigned long)net);
1983 1980