diff options
author | Patrick McHardy <kaber@trash.net> | 2006-03-23 04:16:06 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-03-23 04:16:06 -0500 |
commit | a5cdc030030ef5a16e48aebeb65067bdc3120899 (patch) | |
tree | 36524250e8564ae4e7f891662dace65fdacff521 /net/ipv4 | |
parent | 5e2e71141ca93a3e2543966ced6a6077d4fb8fd8 (diff) |
[IPV4]: Add fib rule netlink notifications
To really make sense of route notifications in the presence of
multiple tables, userspace also needs to be notified about routing
rule updates. Notifications are sent to the so far unused
RTNLGRP_NOP1 (now RTNLGRP_RULE) group.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/fib_rules.c | 27 |
1 files changed, 24 insertions, 3 deletions
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 768e8f5d7daa..ec566f3e66c7 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -104,6 +104,8 @@ static struct hlist_head fib_rules; | |||
104 | 104 | ||
105 | /* writer func called from netlink -- rtnl_sem hold*/ | 105 | /* writer func called from netlink -- rtnl_sem hold*/ |
106 | 106 | ||
107 | static void rtmsg_rule(int, struct fib_rule *); | ||
108 | |||
107 | int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | 109 | int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) |
108 | { | 110 | { |
109 | struct rtattr **rta = arg; | 111 | struct rtattr **rta = arg; |
@@ -131,6 +133,7 @@ int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
131 | 133 | ||
132 | hlist_del_rcu(&r->hlist); | 134 | hlist_del_rcu(&r->hlist); |
133 | r->r_dead = 1; | 135 | r->r_dead = 1; |
136 | rtmsg_rule(RTM_DELRULE, r); | ||
134 | fib_rule_put(r); | 137 | fib_rule_put(r); |
135 | err = 0; | 138 | err = 0; |
136 | break; | 139 | break; |
@@ -253,6 +256,7 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
253 | else | 256 | else |
254 | hlist_add_before_rcu(&new_r->hlist, &r->hlist); | 257 | hlist_add_before_rcu(&new_r->hlist, &r->hlist); |
255 | 258 | ||
259 | rtmsg_rule(RTM_NEWRULE, new_r); | ||
256 | return 0; | 260 | return 0; |
257 | } | 261 | } |
258 | 262 | ||
@@ -382,14 +386,14 @@ static struct notifier_block fib_rules_notifier = { | |||
382 | 386 | ||
383 | static __inline__ int inet_fill_rule(struct sk_buff *skb, | 387 | static __inline__ int inet_fill_rule(struct sk_buff *skb, |
384 | struct fib_rule *r, | 388 | struct fib_rule *r, |
385 | struct netlink_callback *cb, | 389 | u32 pid, u32 seq, int event, |
386 | unsigned int flags) | 390 | unsigned int flags) |
387 | { | 391 | { |
388 | struct rtmsg *rtm; | 392 | struct rtmsg *rtm; |
389 | struct nlmsghdr *nlh; | 393 | struct nlmsghdr *nlh; |
390 | unsigned char *b = skb->tail; | 394 | unsigned char *b = skb->tail; |
391 | 395 | ||
392 | nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWRULE, sizeof(*rtm), flags); | 396 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); |
393 | rtm = NLMSG_DATA(nlh); | 397 | rtm = NLMSG_DATA(nlh); |
394 | rtm->rtm_family = AF_INET; | 398 | rtm->rtm_family = AF_INET; |
395 | rtm->rtm_dst_len = r->r_dst_len; | 399 | rtm->rtm_dst_len = r->r_dst_len; |
@@ -430,6 +434,21 @@ rtattr_failure: | |||
430 | 434 | ||
431 | /* callers should hold rtnl semaphore */ | 435 | /* callers should hold rtnl semaphore */ |
432 | 436 | ||
437 | static void rtmsg_rule(int event, struct fib_rule *r) | ||
438 | { | ||
439 | int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128); | ||
440 | struct sk_buff *skb = alloc_skb(size, GFP_KERNEL); | ||
441 | |||
442 | if (!skb) | ||
443 | netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS); | ||
444 | else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) { | ||
445 | kfree_skb(skb); | ||
446 | netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL); | ||
447 | } else { | ||
448 | netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL); | ||
449 | } | ||
450 | } | ||
451 | |||
433 | int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) | 452 | int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) |
434 | { | 453 | { |
435 | int idx = 0; | 454 | int idx = 0; |
@@ -442,7 +461,9 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) | |||
442 | 461 | ||
443 | if (idx < s_idx) | 462 | if (idx < s_idx) |
444 | continue; | 463 | continue; |
445 | if (inet_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) | 464 | if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, |
465 | cb->nlh->nlmsg_seq, | ||
466 | RTM_NEWRULE, NLM_F_MULTI) < 0) | ||
446 | break; | 467 | break; |
447 | idx++; | 468 | idx++; |
448 | } | 469 | } |