aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-06 23:12:08 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-08 00:25:21 -0400
commit66018506e15bea62de4eefc3298f170b4bfcf5ef (patch)
treed2dbf5c06e317b85f75b946a1b63ed0917d0382a
parent8b37ef0a1f6c2401fea3536facfa21191936bd6c (diff)
ip: Router Alert RCU conversion
Straightforward conversion to RCU. One rwlock becomes a spinlock, and is static. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/ip.h2
-rw-r--r--net/ipv4/ip_input.c11
-rw-r--r--net/ipv4/ip_sockglue.c23
3 files changed, 18 insertions, 18 deletions
diff --git a/include/net/ip.h b/include/net/ip.h
index 452f229c380a..9982c97f0bdc 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -62,10 +62,10 @@ struct ip_ra_chain {
62 struct ip_ra_chain *next; 62 struct ip_ra_chain *next;
63 struct sock *sk; 63 struct sock *sk;
64 void (*destructor)(struct sock *); 64 void (*destructor)(struct sock *);
65 struct rcu_head rcu;
65}; 66};
66 67
67extern struct ip_ra_chain *ip_ra_chain; 68extern struct ip_ra_chain *ip_ra_chain;
68extern rwlock_t ip_ra_lock;
69 69
70/* IP flags. */ 70/* IP flags. */
71#define IP_CE 0x8000 /* Flag: "Congestion" */ 71#define IP_CE 0x8000 /* Flag: "Congestion" */
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d52c9da644cf..d274078b1665 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,7 +146,7 @@
146#include <linux/netlink.h> 146#include <linux/netlink.h>
147 147
148/* 148/*
149 * Process Router Attention IP option 149 * Process Router Attention IP option (RFC 2113)
150 */ 150 */
151int ip_call_ra_chain(struct sk_buff *skb) 151int ip_call_ra_chain(struct sk_buff *skb)
152{ 152{
@@ -155,8 +155,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
155 struct sock *last = NULL; 155 struct sock *last = NULL;
156 struct net_device *dev = skb->dev; 156 struct net_device *dev = skb->dev;
157 157
158 read_lock(&ip_ra_lock); 158 for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
159 for (ra = ip_ra_chain; ra; ra = ra->next) {
160 struct sock *sk = ra->sk; 159 struct sock *sk = ra->sk;
161 160
162 /* If socket is bound to an interface, only report 161 /* If socket is bound to an interface, only report
@@ -167,10 +166,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
167 sk->sk_bound_dev_if == dev->ifindex) && 166 sk->sk_bound_dev_if == dev->ifindex) &&
168 net_eq(sock_net(sk), dev_net(dev))) { 167 net_eq(sock_net(sk), dev_net(dev))) {
169 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 168 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
170 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { 169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
171 read_unlock(&ip_ra_lock);
172 return 1; 170 return 1;
173 }
174 } 171 }
175 if (last) { 172 if (last) {
176 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 173 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -183,10 +180,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
183 180
184 if (last) { 181 if (last) {
185 raw_rcv(last, skb); 182 raw_rcv(last, skb);
186 read_unlock(&ip_ra_lock);
187 return 1; 183 return 1;
188 } 184 }
189 read_unlock(&ip_ra_lock);
190 return 0; 185 return 0;
191} 186}
192 187
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ce231780a2b1..08b9519a24f4 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -239,7 +239,12 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
239 sent to multicast group to reach destination designated router. 239 sent to multicast group to reach destination designated router.
240 */ 240 */
241struct ip_ra_chain *ip_ra_chain; 241struct ip_ra_chain *ip_ra_chain;
242DEFINE_RWLOCK(ip_ra_lock); 242static DEFINE_SPINLOCK(ip_ra_lock);
243
244static void ip_ra_free_rcu(struct rcu_head *head)
245{
246 kfree(container_of(head, struct ip_ra_chain, rcu));
247}
243 248
244int ip_ra_control(struct sock *sk, unsigned char on, 249int ip_ra_control(struct sock *sk, unsigned char on,
245 void (*destructor)(struct sock *)) 250 void (*destructor)(struct sock *))
@@ -251,35 +256,35 @@ int ip_ra_control(struct sock *sk, unsigned char on,
251 256
252 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 257 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
253 258
254 write_lock_bh(&ip_ra_lock); 259 spin_lock_bh(&ip_ra_lock);
255 for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { 260 for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
256 if (ra->sk == sk) { 261 if (ra->sk == sk) {
257 if (on) { 262 if (on) {
258 write_unlock_bh(&ip_ra_lock); 263 spin_unlock_bh(&ip_ra_lock);
259 kfree(new_ra); 264 kfree(new_ra);
260 return -EADDRINUSE; 265 return -EADDRINUSE;
261 } 266 }
262 *rap = ra->next; 267 rcu_assign_pointer(*rap, ra->next);
263 write_unlock_bh(&ip_ra_lock); 268 spin_unlock_bh(&ip_ra_lock);
264 269
265 if (ra->destructor) 270 if (ra->destructor)
266 ra->destructor(sk); 271 ra->destructor(sk);
267 sock_put(sk); 272 sock_put(sk);
268 kfree(ra); 273 call_rcu(&ra->rcu, ip_ra_free_rcu);
269 return 0; 274 return 0;
270 } 275 }
271 } 276 }
272 if (new_ra == NULL) { 277 if (new_ra == NULL) {
273 write_unlock_bh(&ip_ra_lock); 278 spin_unlock_bh(&ip_ra_lock);
274 return -ENOBUFS; 279 return -ENOBUFS;
275 } 280 }
276 new_ra->sk = sk; 281 new_ra->sk = sk;
277 new_ra->destructor = destructor; 282 new_ra->destructor = destructor;
278 283
279 new_ra->next = ra; 284 new_ra->next = ra;
280 *rap = new_ra; 285 rcu_assign_pointer(*rap, new_ra);
281 sock_hold(sk); 286 sock_hold(sk);
282 write_unlock_bh(&ip_ra_lock); 287 spin_unlock_bh(&ip_ra_lock);
283 288
284 return 0; 289 return 0;
285} 290}