aboutsummaryrefslogtreecommitdiffstats
path: root/net/decnet/dn_rules.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/decnet/dn_rules.c')
-rw-r--r--net/decnet/dn_rules.c115
1 files changed, 62 insertions, 53 deletions
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 1060de70bc0c..446faafe2065 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -27,6 +27,8 @@
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/in_route.h> 29#include <linux/in_route.h>
30#include <linux/list.h>
31#include <linux/rcupdate.h>
30#include <asm/atomic.h> 32#include <asm/atomic.h>
31#include <asm/uaccess.h> 33#include <asm/uaccess.h>
32#include <net/neighbour.h> 34#include <net/neighbour.h>
@@ -39,18 +41,18 @@
39 41
40struct dn_fib_rule 42struct dn_fib_rule
41{ 43{
42 struct dn_fib_rule *r_next; 44 struct hlist_node r_hlist;
43 atomic_t r_clntref; 45 atomic_t r_clntref;
44 u32 r_preference; 46 u32 r_preference;
45 unsigned char r_table; 47 unsigned char r_table;
46 unsigned char r_action; 48 unsigned char r_action;
47 unsigned char r_dst_len; 49 unsigned char r_dst_len;
48 unsigned char r_src_len; 50 unsigned char r_src_len;
49 dn_address r_src; 51 __le16 r_src;
50 dn_address r_srcmask; 52 __le16 r_srcmask;
51 dn_address r_dst; 53 __le16 r_dst;
52 dn_address r_dstmask; 54 __le16 r_dstmask;
53 dn_address r_srcmap; 55 __le16 r_srcmap;
54 u8 r_flags; 56 u8 r_flags;
55#ifdef CONFIG_DECNET_ROUTE_FWMARK 57#ifdef CONFIG_DECNET_ROUTE_FWMARK
56 u32 r_fwmark; 58 u32 r_fwmark;
@@ -58,6 +60,7 @@ struct dn_fib_rule
58 int r_ifindex; 60 int r_ifindex;
59 char r_ifname[IFNAMSIZ]; 61 char r_ifname[IFNAMSIZ];
60 int r_dead; 62 int r_dead;
63 struct rcu_head rcu;
61}; 64};
62 65
63static struct dn_fib_rule default_rule = { 66static struct dn_fib_rule default_rule = {
@@ -67,18 +70,17 @@ static struct dn_fib_rule default_rule = {
67 .r_action = RTN_UNICAST 70 .r_action = RTN_UNICAST
68}; 71};
69 72
70static struct dn_fib_rule *dn_fib_rules = &default_rule; 73static struct hlist_head dn_fib_rules;
71static DEFINE_RWLOCK(dn_fib_rules_lock);
72
73 74
74int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 75int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
75{ 76{
76 struct rtattr **rta = arg; 77 struct rtattr **rta = arg;
77 struct rtmsg *rtm = NLMSG_DATA(nlh); 78 struct rtmsg *rtm = NLMSG_DATA(nlh);
78 struct dn_fib_rule *r, **rp; 79 struct dn_fib_rule *r;
80 struct hlist_node *node;
79 int err = -ESRCH; 81 int err = -ESRCH;
80 82
81 for(rp=&dn_fib_rules; (r=*rp) != NULL; rp = &r->r_next) { 83 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
82 if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 2) == 0) && 84 if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 2) == 0) &&
83 rtm->rtm_src_len == r->r_src_len && 85 rtm->rtm_src_len == r->r_src_len &&
84 rtm->rtm_dst_len == r->r_dst_len && 86 rtm->rtm_dst_len == r->r_dst_len &&
@@ -95,10 +97,8 @@ int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
95 if (r == &default_rule) 97 if (r == &default_rule)
96 break; 98 break;
97 99
98 write_lock_bh(&dn_fib_rules_lock); 100 hlist_del_rcu(&r->r_hlist);
99 *rp = r->r_next;
100 r->r_dead = 1; 101 r->r_dead = 1;
101 write_unlock_bh(&dn_fib_rules_lock);
102 dn_fib_rule_put(r); 102 dn_fib_rule_put(r);
103 err = 0; 103 err = 0;
104 break; 104 break;
@@ -108,11 +108,17 @@ int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
108 return err; 108 return err;
109} 109}
110 110
111static inline void dn_fib_rule_put_rcu(struct rcu_head *head)
112{
113 struct dn_fib_rule *r = container_of(head, struct dn_fib_rule, rcu);
114 kfree(r);
115}
116
111void dn_fib_rule_put(struct dn_fib_rule *r) 117void dn_fib_rule_put(struct dn_fib_rule *r)
112{ 118{
113 if (atomic_dec_and_test(&r->r_clntref)) { 119 if (atomic_dec_and_test(&r->r_clntref)) {
114 if (r->r_dead) 120 if (r->r_dead)
115 kfree(r); 121 call_rcu(&r->rcu, dn_fib_rule_put_rcu);
116 else 122 else
117 printk(KERN_DEBUG "Attempt to free alive dn_fib_rule\n"); 123 printk(KERN_DEBUG "Attempt to free alive dn_fib_rule\n");
118 } 124 }
@@ -123,7 +129,8 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
123{ 129{
124 struct rtattr **rta = arg; 130 struct rtattr **rta = arg;
125 struct rtmsg *rtm = NLMSG_DATA(nlh); 131 struct rtmsg *rtm = NLMSG_DATA(nlh);
126 struct dn_fib_rule *r, *new_r, **rp; 132 struct dn_fib_rule *r, *new_r, *last = NULL;
133 struct hlist_node *node = NULL;
127 unsigned char table_id; 134 unsigned char table_id;
128 135
129 if (rtm->rtm_src_len > 16 || rtm->rtm_dst_len > 16) 136 if (rtm->rtm_src_len > 16 || rtm->rtm_dst_len > 16)
@@ -149,6 +156,7 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
149 if (!new_r) 156 if (!new_r)
150 return -ENOMEM; 157 return -ENOMEM;
151 memset(new_r, 0, sizeof(*new_r)); 158 memset(new_r, 0, sizeof(*new_r));
159
152 if (rta[RTA_SRC-1]) 160 if (rta[RTA_SRC-1])
153 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2); 161 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
154 if (rta[RTA_DST-1]) 162 if (rta[RTA_DST-1])
@@ -179,27 +187,26 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
179 } 187 }
180 } 188 }
181 189
182 rp = &dn_fib_rules; 190 r = container_of(dn_fib_rules.first, struct dn_fib_rule, r_hlist);
183 if (!new_r->r_preference) { 191 if (!new_r->r_preference) {
184 r = dn_fib_rules; 192 if (r && r->r_hlist.next != NULL) {
185 if (r && (r = r->r_next) != NULL) { 193 r = container_of(r->r_hlist.next, struct dn_fib_rule, r_hlist);
186 rp = &dn_fib_rules->r_next;
187 if (r->r_preference) 194 if (r->r_preference)
188 new_r->r_preference = r->r_preference - 1; 195 new_r->r_preference = r->r_preference - 1;
189 } 196 }
190 } 197 }
191 198
192 while((r=*rp) != NULL) { 199 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
193 if (r->r_preference > new_r->r_preference) 200 if (r->r_preference > new_r->r_preference)
194 break; 201 break;
195 rp = &r->r_next; 202 last = r;
196 } 203 }
197
198 new_r->r_next = r;
199 atomic_inc(&new_r->r_clntref); 204 atomic_inc(&new_r->r_clntref);
200 write_lock_bh(&dn_fib_rules_lock); 205
201 *rp = new_r; 206 if (last)
202 write_unlock_bh(&dn_fib_rules_lock); 207 hlist_add_after_rcu(&last->r_hlist, &new_r->r_hlist);
208 else
209 hlist_add_before_rcu(&new_r->r_hlist, &r->r_hlist);
203 return 0; 210 return 0;
204} 211}
205 212
@@ -208,12 +215,14 @@ int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
208{ 215{
209 struct dn_fib_rule *r, *policy; 216 struct dn_fib_rule *r, *policy;
210 struct dn_fib_table *tb; 217 struct dn_fib_table *tb;
211 dn_address saddr = flp->fld_src; 218 __le16 saddr = flp->fld_src;
212 dn_address daddr = flp->fld_dst; 219 __le16 daddr = flp->fld_dst;
220 struct hlist_node *node;
213 int err; 221 int err;
214 222
215 read_lock(&dn_fib_rules_lock); 223 rcu_read_lock();
216 for(r = dn_fib_rules; r; r = r->r_next) { 224
225 hlist_for_each_entry_rcu(r, node, &dn_fib_rules, r_hlist) {
217 if (((saddr^r->r_src) & r->r_srcmask) || 226 if (((saddr^r->r_src) & r->r_srcmask) ||
218 ((daddr^r->r_dst) & r->r_dstmask) || 227 ((daddr^r->r_dst) & r->r_dstmask) ||
219#ifdef CONFIG_DECNET_ROUTE_FWMARK 228#ifdef CONFIG_DECNET_ROUTE_FWMARK
@@ -228,14 +237,14 @@ int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
228 policy = r; 237 policy = r;
229 break; 238 break;
230 case RTN_UNREACHABLE: 239 case RTN_UNREACHABLE:
231 read_unlock(&dn_fib_rules_lock); 240 rcu_read_unlock();
232 return -ENETUNREACH; 241 return -ENETUNREACH;
233 default: 242 default:
234 case RTN_BLACKHOLE: 243 case RTN_BLACKHOLE:
235 read_unlock(&dn_fib_rules_lock); 244 rcu_read_unlock();
236 return -EINVAL; 245 return -EINVAL;
237 case RTN_PROHIBIT: 246 case RTN_PROHIBIT:
238 read_unlock(&dn_fib_rules_lock); 247 rcu_read_unlock();
239 return -EACCES; 248 return -EACCES;
240 } 249 }
241 250
@@ -246,20 +255,20 @@ int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
246 res->r = policy; 255 res->r = policy;
247 if (policy) 256 if (policy)
248 atomic_inc(&policy->r_clntref); 257 atomic_inc(&policy->r_clntref);
249 read_unlock(&dn_fib_rules_lock); 258 rcu_read_unlock();
250 return 0; 259 return 0;
251 } 260 }
252 if (err < 0 && err != -EAGAIN) { 261 if (err < 0 && err != -EAGAIN) {
253 read_unlock(&dn_fib_rules_lock); 262 rcu_read_unlock();
254 return err; 263 return err;
255 } 264 }
256 } 265 }
257 266
258 read_unlock(&dn_fib_rules_lock); 267 rcu_read_unlock();
259 return -ESRCH; 268 return -ESRCH;
260} 269}
261 270
262unsigned dnet_addr_type(__u16 addr) 271unsigned dnet_addr_type(__le16 addr)
263{ 272{
264 struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } }; 273 struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } };
265 struct dn_fib_res res; 274 struct dn_fib_res res;
@@ -277,7 +286,7 @@ unsigned dnet_addr_type(__u16 addr)
277 return ret; 286 return ret;
278} 287}
279 288
280__u16 dn_fib_rules_policy(__u16 saddr, struct dn_fib_res *res, unsigned *flags) 289__le16 dn_fib_rules_policy(__le16 saddr, struct dn_fib_res *res, unsigned *flags)
281{ 290{
282 struct dn_fib_rule *r = res->r; 291 struct dn_fib_rule *r = res->r;
283 292
@@ -297,27 +306,23 @@ __u16 dn_fib_rules_policy(__u16 saddr, struct dn_fib_res *res, unsigned *flags)
297 306
298static void dn_fib_rules_detach(struct net_device *dev) 307static void dn_fib_rules_detach(struct net_device *dev)
299{ 308{
309 struct hlist_node *node;
300 struct dn_fib_rule *r; 310 struct dn_fib_rule *r;
301 311
302 for(r = dn_fib_rules; r; r = r->r_next) { 312 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
303 if (r->r_ifindex == dev->ifindex) { 313 if (r->r_ifindex == dev->ifindex)
304 write_lock_bh(&dn_fib_rules_lock);
305 r->r_ifindex = -1; 314 r->r_ifindex = -1;
306 write_unlock_bh(&dn_fib_rules_lock);
307 }
308 } 315 }
309} 316}
310 317
311static void dn_fib_rules_attach(struct net_device *dev) 318static void dn_fib_rules_attach(struct net_device *dev)
312{ 319{
320 struct hlist_node *node;
313 struct dn_fib_rule *r; 321 struct dn_fib_rule *r;
314 322
315 for(r = dn_fib_rules; r; r = r->r_next) { 323 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
316 if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0) { 324 if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
317 write_lock_bh(&dn_fib_rules_lock);
318 r->r_ifindex = dev->ifindex; 325 r->r_ifindex = dev->ifindex;
319 write_unlock_bh(&dn_fib_rules_lock);
320 }
321 } 326 }
322} 327}
323 328
@@ -387,18 +392,20 @@ rtattr_failure:
387 392
388int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) 393int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
389{ 394{
390 int idx; 395 int idx = 0;
391 int s_idx = cb->args[0]; 396 int s_idx = cb->args[0];
392 struct dn_fib_rule *r; 397 struct dn_fib_rule *r;
398 struct hlist_node *node;
393 399
394 read_lock(&dn_fib_rules_lock); 400 rcu_read_lock();
395 for(r = dn_fib_rules, idx = 0; r; r = r->r_next, idx++) { 401 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
396 if (idx < s_idx) 402 if (idx < s_idx)
397 continue; 403 continue;
398 if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) 404 if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
399 break; 405 break;
406 idx++;
400 } 407 }
401 read_unlock(&dn_fib_rules_lock); 408 rcu_read_unlock();
402 cb->args[0] = idx; 409 cb->args[0] = idx;
403 410
404 return skb->len; 411 return skb->len;
@@ -406,6 +413,8 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
406 413
407void __init dn_fib_rules_init(void) 414void __init dn_fib_rules_init(void)
408{ 415{
416 INIT_HLIST_HEAD(&dn_fib_rules);
417 hlist_add_head(&default_rule.r_hlist, &dn_fib_rules);
409 register_netdevice_notifier(&dn_fib_rules_notifier); 418 register_netdevice_notifier(&dn_fib_rules_notifier);
410} 419}
411 420