aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/ip6mr.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-05-11 08:40:51 -0400
committerPatrick McHardy <kaber@trash.net>2010-05-11 08:40:51 -0400
commitf30a77842129b5656360cc1f5db48a3fcfb64528 (patch)
tree3a01eac73fd0e86d3c7a976ab6bccca9878cd35c /net/ipv6/ip6mr.c
parentb5aa30b19121de49021fba57aa1f6e4c787fcf67 (diff)
ipv6: ip6mr: convert struct mfc_cache to struct list_head
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv6/ip6mr.c')
-rw-r--r--net/ipv6/ip6mr.c127
1 files changed, 62 insertions, 65 deletions
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index b3783a436bbd..08e09042ad1c 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -89,7 +89,7 @@ static void mroute_clean_tables(struct net *net);
89 89
90struct ipmr_mfc_iter { 90struct ipmr_mfc_iter {
91 struct seq_net_private p; 91 struct seq_net_private p;
92 struct mfc6_cache **cache; 92 struct list_head *cache;
93 int ct; 93 int ct;
94}; 94};
95 95
@@ -99,18 +99,18 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
99{ 99{
100 struct mfc6_cache *mfc; 100 struct mfc6_cache *mfc;
101 101
102 it->cache = net->ipv6.mfc6_cache_array;
103 read_lock(&mrt_lock); 102 read_lock(&mrt_lock);
104 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) 103 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
105 for (mfc = net->ipv6.mfc6_cache_array[it->ct]; 104 it->cache = &net->ipv6.mfc6_cache_array[it->ct];
106 mfc; mfc = mfc->next) 105 list_for_each_entry(mfc, it->cache, list)
107 if (pos-- == 0) 106 if (pos-- == 0)
108 return mfc; 107 return mfc;
108 }
109 read_unlock(&mrt_lock); 109 read_unlock(&mrt_lock);
110 110
111 it->cache = &net->ipv6.mfc6_unres_queue;
112 spin_lock_bh(&mfc_unres_lock); 111 spin_lock_bh(&mfc_unres_lock);
113 for (mfc = net->ipv6.mfc6_unres_queue; mfc; mfc = mfc->next) 112 it->cache = &net->ipv6.mfc6_unres_queue;
113 list_for_each_entry(mfc, it->cache, list)
114 if (pos-- == 0) 114 if (pos-- == 0)
115 return mfc; 115 return mfc;
116 spin_unlock_bh(&mfc_unres_lock); 116 spin_unlock_bh(&mfc_unres_lock);
@@ -119,9 +119,6 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
119 return NULL; 119 return NULL;
120} 120}
121 121
122
123
124
125/* 122/*
126 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif 123 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
127 */ 124 */
@@ -238,18 +235,19 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
238 if (v == SEQ_START_TOKEN) 235 if (v == SEQ_START_TOKEN)
239 return ipmr_mfc_seq_idx(net, seq->private, 0); 236 return ipmr_mfc_seq_idx(net, seq->private, 0);
240 237
241 if (mfc->next) 238 if (mfc->list.next != it->cache)
242 return mfc->next; 239 return list_entry(mfc->list.next, struct mfc6_cache, list);
243 240
244 if (it->cache == &net->ipv6.mfc6_unres_queue) 241 if (it->cache == &net->ipv6.mfc6_unres_queue)
245 goto end_of_list; 242 goto end_of_list;
246 243
247 BUG_ON(it->cache != net->ipv6.mfc6_cache_array); 244 BUG_ON(it->cache != &net->ipv6.mfc6_cache_array[it->ct]);
248 245
249 while (++it->ct < MFC6_LINES) { 246 while (++it->ct < MFC6_LINES) {
250 mfc = net->ipv6.mfc6_cache_array[it->ct]; 247 it->cache = &net->ipv6.mfc6_cache_array[it->ct];
251 if (mfc) 248 if (list_empty(it->cache))
252 return mfc; 249 continue;
250 return list_first_entry(it->cache, struct mfc6_cache, list);
253 } 251 }
254 252
255 /* exhausted cache_array, show unresolved */ 253 /* exhausted cache_array, show unresolved */
@@ -258,9 +256,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
258 it->ct = 0; 256 it->ct = 0;
259 257
260 spin_lock_bh(&mfc_unres_lock); 258 spin_lock_bh(&mfc_unres_lock);
261 mfc = net->ipv6.mfc6_unres_queue; 259 if (!list_empty(it->cache))
262 if (mfc) 260 return list_first_entry(it->cache, struct mfc6_cache, list);
263 return mfc;
264 261
265 end_of_list: 262 end_of_list:
266 spin_unlock_bh(&mfc_unres_lock); 263 spin_unlock_bh(&mfc_unres_lock);
@@ -560,25 +557,22 @@ static void ipmr_do_expire_process(struct net *net)
560{ 557{
561 unsigned long now = jiffies; 558 unsigned long now = jiffies;
562 unsigned long expires = 10 * HZ; 559 unsigned long expires = 10 * HZ;
563 struct mfc6_cache *c, **cp; 560 struct mfc6_cache *c, *next;
564
565 cp = &net->ipv6.mfc6_unres_queue;
566 561
567 while ((c = *cp) != NULL) { 562 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) {
568 if (time_after(c->mfc_un.unres.expires, now)) { 563 if (time_after(c->mfc_un.unres.expires, now)) {
569 /* not yet... */ 564 /* not yet... */
570 unsigned long interval = c->mfc_un.unres.expires - now; 565 unsigned long interval = c->mfc_un.unres.expires - now;
571 if (interval < expires) 566 if (interval < expires)
572 expires = interval; 567 expires = interval;
573 cp = &c->next;
574 continue; 568 continue;
575 } 569 }
576 570
577 *cp = c->next; 571 list_del(&c->list);
578 ip6mr_destroy_unres(net, c); 572 ip6mr_destroy_unres(net, c);
579 } 573 }
580 574
581 if (net->ipv6.mfc6_unres_queue != NULL) 575 if (!list_empty(&net->ipv6.mfc6_unres_queue))
582 mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires); 576 mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires);
583} 577}
584 578
@@ -591,7 +585,7 @@ static void ipmr_expire_process(unsigned long arg)
591 return; 585 return;
592 } 586 }
593 587
594 if (net->ipv6.mfc6_unres_queue != NULL) 588 if (!list_empty(&net->ipv6.mfc6_unres_queue))
595 ipmr_do_expire_process(net); 589 ipmr_do_expire_process(net);
596 590
597 spin_unlock(&mfc_unres_lock); 591 spin_unlock(&mfc_unres_lock);
@@ -706,12 +700,12 @@ static struct mfc6_cache *ip6mr_cache_find(struct net *net,
706 int line = MFC6_HASH(mcastgrp, origin); 700 int line = MFC6_HASH(mcastgrp, origin);
707 struct mfc6_cache *c; 701 struct mfc6_cache *c;
708 702
709 for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) { 703 list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) {
710 if (ipv6_addr_equal(&c->mf6c_origin, origin) && 704 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
711 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) 705 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
712 break; 706 return c;
713 } 707 }
714 return c; 708 return NULL;
715} 709}
716 710
717/* 711/*
@@ -872,17 +866,20 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
872static int 866static int
873ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) 867ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
874{ 868{
869 bool found = false;
875 int err; 870 int err;
876 struct mfc6_cache *c; 871 struct mfc6_cache *c;
877 872
878 spin_lock_bh(&mfc_unres_lock); 873 spin_lock_bh(&mfc_unres_lock);
879 for (c = net->ipv6.mfc6_unres_queue; c; c = c->next) { 874 list_for_each_entry(c, &net->ipv6.mfc6_unres_queue, list) {
880 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && 875 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
881 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) 876 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
877 found = true;
882 break; 878 break;
879 }
883 } 880 }
884 881
885 if (c == NULL) { 882 if (!found) {
886 /* 883 /*
887 * Create a new entry if allowable 884 * Create a new entry if allowable
888 */ 885 */
@@ -918,8 +915,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
918 } 915 }
919 916
920 atomic_inc(&net->ipv6.cache_resolve_queue_len); 917 atomic_inc(&net->ipv6.cache_resolve_queue_len);
921 c->next = net->ipv6.mfc6_unres_queue; 918 list_add(&c->list, &net->ipv6.mfc6_unres_queue);
922 net->ipv6.mfc6_unres_queue = c;
923 919
924 ipmr_do_expire_process(net); 920 ipmr_do_expire_process(net);
925 } 921 }
@@ -946,16 +942,15 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
946static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc) 942static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc)
947{ 943{
948 int line; 944 int line;
949 struct mfc6_cache *c, **cp; 945 struct mfc6_cache *c, *next;
950 946
951 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); 947 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
952 948
953 for (cp = &net->ipv6.mfc6_cache_array[line]; 949 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[line], list) {
954 (c = *cp) != NULL; cp = &c->next) {
955 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && 950 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
956 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { 951 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
957 write_lock_bh(&mrt_lock); 952 write_lock_bh(&mrt_lock);
958 *cp = c->next; 953 list_del(&c->list);
959 write_unlock_bh(&mrt_lock); 954 write_unlock_bh(&mrt_lock);
960 955
961 ip6mr_cache_free(c); 956 ip6mr_cache_free(c);
@@ -997,7 +992,9 @@ static struct notifier_block ip6_mr_notifier = {
997 992
998static int __net_init ip6mr_net_init(struct net *net) 993static int __net_init ip6mr_net_init(struct net *net)
999{ 994{
995 unsigned int i;
1000 int err = 0; 996 int err = 0;
997
1001 net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device), 998 net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device),
1002 GFP_KERNEL); 999 GFP_KERNEL);
1003 if (!net->ipv6.vif6_table) { 1000 if (!net->ipv6.vif6_table) {
@@ -1007,13 +1004,18 @@ static int __net_init ip6mr_net_init(struct net *net)
1007 1004
1008 /* Forwarding cache */ 1005 /* Forwarding cache */
1009 net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES, 1006 net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES,
1010 sizeof(struct mfc6_cache *), 1007 sizeof(struct list_head),
1011 GFP_KERNEL); 1008 GFP_KERNEL);
1012 if (!net->ipv6.mfc6_cache_array) { 1009 if (!net->ipv6.mfc6_cache_array) {
1013 err = -ENOMEM; 1010 err = -ENOMEM;
1014 goto fail_mfc6_cache; 1011 goto fail_mfc6_cache;
1015 } 1012 }
1016 1013
1014 for (i = 0; i < MFC6_LINES; i++)
1015 INIT_LIST_HEAD(&net->ipv6.mfc6_cache_array[i]);
1016
1017 INIT_LIST_HEAD(&net->ipv6.mfc6_unres_queue);
1018
1017 setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process, 1019 setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process,
1018 (unsigned long)net); 1020 (unsigned long)net);
1019 1021
@@ -1105,8 +1107,9 @@ void ip6_mr_cleanup(void)
1105 1107
1106static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) 1108static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1107{ 1109{
1110 bool found = false;
1108 int line; 1111 int line;
1109 struct mfc6_cache *uc, *c, **cp; 1112 struct mfc6_cache *uc, *c;
1110 unsigned char ttls[MAXMIFS]; 1113 unsigned char ttls[MAXMIFS];
1111 int i; 1114 int i;
1112 1115
@@ -1122,14 +1125,15 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1122 1125
1123 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); 1126 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1124 1127
1125 for (cp = &net->ipv6.mfc6_cache_array[line]; 1128 list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) {
1126 (c = *cp) != NULL; cp = &c->next) {
1127 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && 1129 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1128 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) 1130 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1131 found = true;
1129 break; 1132 break;
1133 }
1130 } 1134 }
1131 1135
1132 if (c != NULL) { 1136 if (found) {
1133 write_lock_bh(&mrt_lock); 1137 write_lock_bh(&mrt_lock);
1134 c->mf6c_parent = mfc->mf6cc_parent; 1138 c->mf6c_parent = mfc->mf6cc_parent;
1135 ip6mr_update_thresholds(net, c, ttls); 1139 ip6mr_update_thresholds(net, c, ttls);
@@ -1154,29 +1158,29 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1154 c->mfc_flags |= MFC_STATIC; 1158 c->mfc_flags |= MFC_STATIC;
1155 1159
1156 write_lock_bh(&mrt_lock); 1160 write_lock_bh(&mrt_lock);
1157 c->next = net->ipv6.mfc6_cache_array[line]; 1161 list_add(&c->list, &net->ipv6.mfc6_cache_array[line]);
1158 net->ipv6.mfc6_cache_array[line] = c;
1159 write_unlock_bh(&mrt_lock); 1162 write_unlock_bh(&mrt_lock);
1160 1163
1161 /* 1164 /*
1162 * Check to see if we resolved a queued list. If so we 1165 * Check to see if we resolved a queued list. If so we
1163 * need to send on the frames and tidy up. 1166 * need to send on the frames and tidy up.
1164 */ 1167 */
1168 found = false;
1165 spin_lock_bh(&mfc_unres_lock); 1169 spin_lock_bh(&mfc_unres_lock);
1166 for (cp = &net->ipv6.mfc6_unres_queue; (uc = *cp) != NULL; 1170 list_for_each_entry(uc, &net->ipv6.mfc6_unres_queue, list) {
1167 cp = &uc->next) {
1168 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && 1171 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1169 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { 1172 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1170 *cp = uc->next; 1173 list_del(&uc->list);
1171 atomic_dec(&net->ipv6.cache_resolve_queue_len); 1174 atomic_dec(&net->ipv6.cache_resolve_queue_len);
1175 found = true;
1172 break; 1176 break;
1173 } 1177 }
1174 } 1178 }
1175 if (net->ipv6.mfc6_unres_queue == NULL) 1179 if (list_empty(&net->ipv6.mfc6_unres_queue))
1176 del_timer(&net->ipv6.ipmr_expire_timer); 1180 del_timer(&net->ipv6.ipmr_expire_timer);
1177 spin_unlock_bh(&mfc_unres_lock); 1181 spin_unlock_bh(&mfc_unres_lock);
1178 1182
1179 if (uc) { 1183 if (found) {
1180 ip6mr_cache_resolve(net, uc, c); 1184 ip6mr_cache_resolve(net, uc, c);
1181 ip6mr_cache_free(uc); 1185 ip6mr_cache_free(uc);
1182 } 1186 }
@@ -1191,6 +1195,7 @@ static void mroute_clean_tables(struct net *net)
1191{ 1195{
1192 int i; 1196 int i;
1193 LIST_HEAD(list); 1197 LIST_HEAD(list);
1198 struct mfc6_cache *c, *next;
1194 1199
1195 /* 1200 /*
1196 * Shut down all active vif entries 1201 * Shut down all active vif entries
@@ -1205,16 +1210,11 @@ static void mroute_clean_tables(struct net *net)
1205 * Wipe the cache 1210 * Wipe the cache
1206 */ 1211 */
1207 for (i = 0; i < MFC6_LINES; i++) { 1212 for (i = 0; i < MFC6_LINES; i++) {
1208 struct mfc6_cache *c, **cp; 1213 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[i], list) {
1209 1214 if (c->mfc_flags & MFC_STATIC)
1210 cp = &net->ipv6.mfc6_cache_array[i];
1211 while ((c = *cp) != NULL) {
1212 if (c->mfc_flags & MFC_STATIC) {
1213 cp = &c->next;
1214 continue; 1215 continue;
1215 }
1216 write_lock_bh(&mrt_lock); 1216 write_lock_bh(&mrt_lock);
1217 *cp = c->next; 1217 list_del(&c->list);
1218 write_unlock_bh(&mrt_lock); 1218 write_unlock_bh(&mrt_lock);
1219 1219
1220 ip6mr_cache_free(c); 1220 ip6mr_cache_free(c);
@@ -1222,12 +1222,9 @@ static void mroute_clean_tables(struct net *net)
1222 } 1222 }
1223 1223
1224 if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) { 1224 if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) {
1225 struct mfc6_cache *c, **cp;
1226
1227 spin_lock_bh(&mfc_unres_lock); 1225 spin_lock_bh(&mfc_unres_lock);
1228 cp = &net->ipv6.mfc6_unres_queue; 1226 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) {
1229 while ((c = *cp) != NULL) { 1227 list_del(&c->list);
1230 *cp = c->next;
1231 ip6mr_destroy_unres(net, c); 1228 ip6mr_destroy_unres(net, c);
1232 } 1229 }
1233 spin_unlock_bh(&mfc_unres_lock); 1230 spin_unlock_bh(&mfc_unres_lock);