aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki / 吉藤英明 <yoshfuji@linux-ipv6.org>2013-01-30 04:27:47 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-30 22:41:13 -0500
commitd3aedd5ebd4b0b925b0bcda548066803e1318499 (patch)
treed145189ad0a3192326d8df5b73188ec77ca942b7 /net
parentf256dc59d0729cf7d371b93062375d9bc79c1e44 (diff)
ipv6 flowlabel: Convert hash list to RCU.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv6/ip6_flowlabel.c94
1 files changed, 54 insertions, 40 deletions
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 5d767f1b8780..da156015d827 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -51,25 +51,33 @@
51#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK) 51#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
52 52
53static atomic_t fl_size = ATOMIC_INIT(0); 53static atomic_t fl_size = ATOMIC_INIT(0);
54static struct ip6_flowlabel *fl_ht[FL_HASH_MASK+1]; 54static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
55 55
56static void ip6_fl_gc(unsigned long dummy); 56static void ip6_fl_gc(unsigned long dummy);
57static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0); 57static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
58 58
59/* FL hash table lock: it protects only of GC */ 59/* FL hash table lock: it protects only of GC */
60 60
61static DEFINE_RWLOCK(ip6_fl_lock); 61static DEFINE_SPINLOCK(ip6_fl_lock);
62 62
63/* Big socket sock */ 63/* Big socket sock */
64 64
65static DEFINE_RWLOCK(ip6_sk_fl_lock); 65static DEFINE_RWLOCK(ip6_sk_fl_lock);
66 66
67#define for_each_fl_rcu(hash, fl) \
68 for (fl = rcu_dereference(fl_ht[(hash)]); \
69 fl != NULL; \
70 fl = rcu_dereference(fl->next))
71#define for_each_fl_continue_rcu(fl) \
72 for (fl = rcu_dereference(fl->next); \
73 fl != NULL; \
74 fl = rcu_dereference(fl->next))
67 75
68static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label) 76static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
69{ 77{
70 struct ip6_flowlabel *fl; 78 struct ip6_flowlabel *fl;
71 79
72 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) { 80 for_each_fl_rcu(FL_HASH(label), fl) {
73 if (fl->label == label && net_eq(fl->fl_net, net)) 81 if (fl->label == label && net_eq(fl->fl_net, net))
74 return fl; 82 return fl;
75 } 83 }
@@ -80,11 +88,11 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
80{ 88{
81 struct ip6_flowlabel *fl; 89 struct ip6_flowlabel *fl;
82 90
83 read_lock_bh(&ip6_fl_lock); 91 rcu_read_lock_bh();
84 fl = __fl_lookup(net, label); 92 fl = __fl_lookup(net, label);
85 if (fl) 93 if (fl && !atomic_inc_not_zero(&fl->users))
86 atomic_inc(&fl->users); 94 fl = NULL;
87 read_unlock_bh(&ip6_fl_lock); 95 rcu_read_unlock_bh();
88 return fl; 96 return fl;
89} 97}
90 98
@@ -96,13 +104,13 @@ static void fl_free(struct ip6_flowlabel *fl)
96 put_pid(fl->owner.pid); 104 put_pid(fl->owner.pid);
97 release_net(fl->fl_net); 105 release_net(fl->fl_net);
98 kfree(fl->opt); 106 kfree(fl->opt);
107 kfree_rcu(fl, rcu);
99 } 108 }
100 kfree(fl);
101} 109}
102 110
103static void fl_release(struct ip6_flowlabel *fl) 111static void fl_release(struct ip6_flowlabel *fl)
104{ 112{
105 write_lock_bh(&ip6_fl_lock); 113 spin_lock_bh(&ip6_fl_lock);
106 114
107 fl->lastuse = jiffies; 115 fl->lastuse = jiffies;
108 if (atomic_dec_and_test(&fl->users)) { 116 if (atomic_dec_and_test(&fl->users)) {
@@ -119,7 +127,7 @@ static void fl_release(struct ip6_flowlabel *fl)
119 time_after(ip6_fl_gc_timer.expires, ttd)) 127 time_after(ip6_fl_gc_timer.expires, ttd))
120 mod_timer(&ip6_fl_gc_timer, ttd); 128 mod_timer(&ip6_fl_gc_timer, ttd);
121 } 129 }
122 write_unlock_bh(&ip6_fl_lock); 130 spin_unlock_bh(&ip6_fl_lock);
123} 131}
124 132
125static void ip6_fl_gc(unsigned long dummy) 133static void ip6_fl_gc(unsigned long dummy)
@@ -128,12 +136,13 @@ static void ip6_fl_gc(unsigned long dummy)
128 unsigned long now = jiffies; 136 unsigned long now = jiffies;
129 unsigned long sched = 0; 137 unsigned long sched = 0;
130 138
131 write_lock(&ip6_fl_lock); 139 spin_lock(&ip6_fl_lock);
132 140
133 for (i=0; i<=FL_HASH_MASK; i++) { 141 for (i=0; i<=FL_HASH_MASK; i++) {
134 struct ip6_flowlabel *fl, **flp; 142 struct ip6_flowlabel *fl, **flp;
135 flp = &fl_ht[i]; 143 flp = &fl_ht[i];
136 while ((fl=*flp) != NULL) { 144 while ((fl = rcu_dereference_protected(*flp,
145 lockdep_is_held(&ip6_fl_lock))) != NULL) {
137 if (atomic_read(&fl->users) == 0) { 146 if (atomic_read(&fl->users) == 0) {
138 unsigned long ttd = fl->lastuse + fl->linger; 147 unsigned long ttd = fl->lastuse + fl->linger;
139 if (time_after(ttd, fl->expires)) 148 if (time_after(ttd, fl->expires))
@@ -156,18 +165,19 @@ static void ip6_fl_gc(unsigned long dummy)
156 if (sched) { 165 if (sched) {
157 mod_timer(&ip6_fl_gc_timer, sched); 166 mod_timer(&ip6_fl_gc_timer, sched);
158 } 167 }
159 write_unlock(&ip6_fl_lock); 168 spin_unlock(&ip6_fl_lock);
160} 169}
161 170
162static void __net_exit ip6_fl_purge(struct net *net) 171static void __net_exit ip6_fl_purge(struct net *net)
163{ 172{
164 int i; 173 int i;
165 174
166 write_lock(&ip6_fl_lock); 175 spin_lock(&ip6_fl_lock);
167 for (i = 0; i <= FL_HASH_MASK; i++) { 176 for (i = 0; i <= FL_HASH_MASK; i++) {
168 struct ip6_flowlabel *fl, **flp; 177 struct ip6_flowlabel *fl, **flp;
169 flp = &fl_ht[i]; 178 flp = &fl_ht[i];
170 while ((fl = *flp) != NULL) { 179 while ((fl = rcu_dereference_protected(*flp,
180 lockdep_is_held(&ip6_fl_lock))) != NULL) {
171 if (net_eq(fl->fl_net, net) && 181 if (net_eq(fl->fl_net, net) &&
172 atomic_read(&fl->users) == 0) { 182 atomic_read(&fl->users) == 0) {
173 *flp = fl->next; 183 *flp = fl->next;
@@ -178,7 +188,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
178 flp = &fl->next; 188 flp = &fl->next;
179 } 189 }
180 } 190 }
181 write_unlock(&ip6_fl_lock); 191 spin_unlock(&ip6_fl_lock);
182} 192}
183 193
184static struct ip6_flowlabel *fl_intern(struct net *net, 194static struct ip6_flowlabel *fl_intern(struct net *net,
@@ -188,7 +198,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
188 198
189 fl->label = label & IPV6_FLOWLABEL_MASK; 199 fl->label = label & IPV6_FLOWLABEL_MASK;
190 200
191 write_lock_bh(&ip6_fl_lock); 201 spin_lock_bh(&ip6_fl_lock);
192 if (label == 0) { 202 if (label == 0) {
193 for (;;) { 203 for (;;) {
194 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK; 204 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
@@ -210,16 +220,16 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
210 lfl = __fl_lookup(net, fl->label); 220 lfl = __fl_lookup(net, fl->label);
211 if (lfl != NULL) { 221 if (lfl != NULL) {
212 atomic_inc(&lfl->users); 222 atomic_inc(&lfl->users);
213 write_unlock_bh(&ip6_fl_lock); 223 spin_unlock_bh(&ip6_fl_lock);
214 return lfl; 224 return lfl;
215 } 225 }
216 } 226 }
217 227
218 fl->lastuse = jiffies; 228 fl->lastuse = jiffies;
219 fl->next = fl_ht[FL_HASH(fl->label)]; 229 fl->next = fl_ht[FL_HASH(fl->label)];
220 fl_ht[FL_HASH(fl->label)] = fl; 230 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
221 atomic_inc(&fl_size); 231 atomic_inc(&fl_size);
222 write_unlock_bh(&ip6_fl_lock); 232 spin_unlock_bh(&ip6_fl_lock);
223 return NULL; 233 return NULL;
224} 234}
225 235
@@ -650,13 +660,13 @@ static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
650 struct net *net = seq_file_net(seq); 660 struct net *net = seq_file_net(seq);
651 661
652 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { 662 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
653 fl = fl_ht[state->bucket]; 663 for_each_fl_rcu(state->bucket, fl) {
654 664 if (net_eq(fl->fl_net, net))
655 while (fl && !net_eq(fl->fl_net, net)) 665 goto out;
656 fl = fl->next; 666 }
657 if (fl)
658 break;
659 } 667 }
668 fl = NULL;
669out:
660 return fl; 670 return fl;
661} 671}
662 672
@@ -665,18 +675,22 @@ static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flo
665 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 675 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
666 struct net *net = seq_file_net(seq); 676 struct net *net = seq_file_net(seq);
667 677
668 fl = fl->next; 678 for_each_fl_continue_rcu(fl) {
679 if (net_eq(fl->fl_net, net))
680 goto out;
681 }
682
669try_again: 683try_again:
670 while (fl && !net_eq(fl->fl_net, net)) 684 if (++state->bucket <= FL_HASH_MASK) {
671 fl = fl->next; 685 for_each_fl_rcu(state->bucket, fl) {
672 686 if (net_eq(fl->fl_net, net))
673 while (!fl) { 687 goto out;
674 if (++state->bucket <= FL_HASH_MASK) { 688 }
675 fl = fl_ht[state->bucket]; 689 goto try_again;
676 goto try_again;
677 } else
678 break;
679 } 690 }
691 fl = NULL;
692
693out:
680 return fl; 694 return fl;
681} 695}
682 696
@@ -690,9 +704,9 @@ static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
690} 704}
691 705
692static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos) 706static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
693 __acquires(ip6_fl_lock) 707 __acquires(RCU)
694{ 708{
695 read_lock_bh(&ip6_fl_lock); 709 rcu_read_lock_bh();
696 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 710 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
697} 711}
698 712
@@ -709,9 +723,9 @@ static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
709} 723}
710 724
711static void ip6fl_seq_stop(struct seq_file *seq, void *v) 725static void ip6fl_seq_stop(struct seq_file *seq, void *v)
712 __releases(ip6_fl_lock) 726 __releases(RCU)
713{ 727{
714 read_unlock_bh(&ip6_fl_lock); 728 rcu_read_unlock_bh();
715} 729}
716 730
717static int ip6fl_seq_show(struct seq_file *seq, void *v) 731static int ip6fl_seq_show(struct seq_file *seq, void *v)