diff options
Diffstat (limited to 'net/decnet/dn_table.c')
-rw-r--r-- | net/decnet/dn_table.c | 163 |
1 files changed, 111 insertions, 52 deletions
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index e926c952e363..317904bb5896 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <net/neighbour.h> | 30 | #include <net/neighbour.h> |
31 | #include <net/dst.h> | 31 | #include <net/dst.h> |
32 | #include <net/flow.h> | 32 | #include <net/flow.h> |
33 | #include <net/fib_rules.h> | ||
33 | #include <net/dn.h> | 34 | #include <net/dn.h> |
34 | #include <net/dn_route.h> | 35 | #include <net/dn_route.h> |
35 | #include <net/dn_fib.h> | 36 | #include <net/dn_fib.h> |
@@ -74,9 +75,9 @@ for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next) | |||
74 | for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next) | 75 | for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next) |
75 | 76 | ||
76 | #define RT_TABLE_MIN 1 | 77 | #define RT_TABLE_MIN 1 |
77 | 78 | #define DN_FIB_TABLE_HASHSZ 256 | |
79 | static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; | ||
78 | static DEFINE_RWLOCK(dn_fib_tables_lock); | 80 | static DEFINE_RWLOCK(dn_fib_tables_lock); |
79 | struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1]; | ||
80 | 81 | ||
81 | static kmem_cache_t *dn_hash_kmem __read_mostly; | 82 | static kmem_cache_t *dn_hash_kmem __read_mostly; |
82 | static int dn_fib_hash_zombies; | 83 | static int dn_fib_hash_zombies; |
@@ -263,7 +264,7 @@ static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern | |||
263 | } | 264 | } |
264 | 265 | ||
265 | static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 266 | static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
266 | u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, | 267 | u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, |
267 | struct dn_fib_info *fi, unsigned int flags) | 268 | struct dn_fib_info *fi, unsigned int flags) |
268 | { | 269 | { |
269 | struct rtmsg *rtm; | 270 | struct rtmsg *rtm; |
@@ -277,6 +278,7 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
277 | rtm->rtm_src_len = 0; | 278 | rtm->rtm_src_len = 0; |
278 | rtm->rtm_tos = 0; | 279 | rtm->rtm_tos = 0; |
279 | rtm->rtm_table = tb_id; | 280 | rtm->rtm_table = tb_id; |
281 | RTA_PUT_U32(skb, RTA_TABLE, tb_id); | ||
280 | rtm->rtm_flags = fi->fib_flags; | 282 | rtm->rtm_flags = fi->fib_flags; |
281 | rtm->rtm_scope = scope; | 283 | rtm->rtm_scope = scope; |
282 | rtm->rtm_type = type; | 284 | rtm->rtm_type = type; |
@@ -326,29 +328,29 @@ rtattr_failure: | |||
326 | } | 328 | } |
327 | 329 | ||
328 | 330 | ||
329 | static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, int tb_id, | 331 | static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id, |
330 | struct nlmsghdr *nlh, struct netlink_skb_parms *req) | 332 | struct nlmsghdr *nlh, struct netlink_skb_parms *req) |
331 | { | 333 | { |
332 | struct sk_buff *skb; | 334 | struct sk_buff *skb; |
333 | u32 pid = req ? req->pid : 0; | 335 | u32 pid = req ? req->pid : 0; |
334 | int size = NLMSG_SPACE(sizeof(struct rtmsg) + 256); | 336 | int err = -ENOBUFS; |
335 | 337 | ||
336 | skb = alloc_skb(size, GFP_KERNEL); | 338 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); |
337 | if (!skb) | 339 | if (skb == NULL) |
338 | return; | 340 | goto errout; |
339 | 341 | ||
340 | if (dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, | 342 | err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, |
341 | f->fn_type, f->fn_scope, &f->fn_key, z, | 343 | f->fn_type, f->fn_scope, &f->fn_key, z, |
342 | DN_FIB_INFO(f), 0) < 0) { | 344 | DN_FIB_INFO(f), 0); |
345 | if (err < 0) { | ||
343 | kfree_skb(skb); | 346 | kfree_skb(skb); |
344 | return; | 347 | goto errout; |
345 | } | 348 | } |
346 | NETLINK_CB(skb).dst_group = RTNLGRP_DECnet_ROUTE; | 349 | |
347 | if (nlh->nlmsg_flags & NLM_F_ECHO) | 350 | err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); |
348 | atomic_inc(&skb->users); | 351 | errout: |
349 | netlink_broadcast(rtnl, skb, pid, RTNLGRP_DECnet_ROUTE, GFP_KERNEL); | 352 | if (err < 0) |
350 | if (nlh->nlmsg_flags & NLM_F_ECHO) | 353 | rtnl_set_sk_err(RTNLGRP_DECnet_ROUTE, err); |
351 | netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); | ||
352 | } | 354 | } |
353 | 355 | ||
354 | static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, | 356 | static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, |
@@ -359,7 +361,7 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, | |||
359 | { | 361 | { |
360 | int i, s_i; | 362 | int i, s_i; |
361 | 363 | ||
362 | s_i = cb->args[3]; | 364 | s_i = cb->args[4]; |
363 | for(i = 0; f; i++, f = f->fn_next) { | 365 | for(i = 0; f; i++, f = f->fn_next) { |
364 | if (i < s_i) | 366 | if (i < s_i) |
365 | continue; | 367 | continue; |
@@ -372,11 +374,11 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, | |||
372 | (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, | 374 | (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, |
373 | f->fn_scope, &f->fn_key, dz->dz_order, | 375 | f->fn_scope, &f->fn_key, dz->dz_order, |
374 | f->fn_info, NLM_F_MULTI) < 0) { | 376 | f->fn_info, NLM_F_MULTI) < 0) { |
375 | cb->args[3] = i; | 377 | cb->args[4] = i; |
376 | return -1; | 378 | return -1; |
377 | } | 379 | } |
378 | } | 380 | } |
379 | cb->args[3] = i; | 381 | cb->args[4] = i; |
380 | return skb->len; | 382 | return skb->len; |
381 | } | 383 | } |
382 | 384 | ||
@@ -387,20 +389,20 @@ static __inline__ int dn_hash_dump_zone(struct sk_buff *skb, | |||
387 | { | 389 | { |
388 | int h, s_h; | 390 | int h, s_h; |
389 | 391 | ||
390 | s_h = cb->args[2]; | 392 | s_h = cb->args[3]; |
391 | for(h = 0; h < dz->dz_divisor; h++) { | 393 | for(h = 0; h < dz->dz_divisor; h++) { |
392 | if (h < s_h) | 394 | if (h < s_h) |
393 | continue; | 395 | continue; |
394 | if (h > s_h) | 396 | if (h > s_h) |
395 | memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0])); | 397 | memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0])); |
396 | if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL) | 398 | if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL) |
397 | continue; | 399 | continue; |
398 | if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { | 400 | if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { |
399 | cb->args[2] = h; | 401 | cb->args[3] = h; |
400 | return -1; | 402 | return -1; |
401 | } | 403 | } |
402 | } | 404 | } |
403 | cb->args[2] = h; | 405 | cb->args[3] = h; |
404 | return skb->len; | 406 | return skb->len; |
405 | } | 407 | } |
406 | 408 | ||
@@ -411,26 +413,63 @@ static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, | |||
411 | struct dn_zone *dz; | 413 | struct dn_zone *dz; |
412 | struct dn_hash *table = (struct dn_hash *)tb->data; | 414 | struct dn_hash *table = (struct dn_hash *)tb->data; |
413 | 415 | ||
414 | s_m = cb->args[1]; | 416 | s_m = cb->args[2]; |
415 | read_lock(&dn_fib_tables_lock); | 417 | read_lock(&dn_fib_tables_lock); |
416 | for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) { | 418 | for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) { |
417 | if (m < s_m) | 419 | if (m < s_m) |
418 | continue; | 420 | continue; |
419 | if (m > s_m) | 421 | if (m > s_m) |
420 | memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(cb->args[0])); | 422 | memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0])); |
421 | 423 | ||
422 | if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) { | 424 | if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) { |
423 | cb->args[1] = m; | 425 | cb->args[2] = m; |
424 | read_unlock(&dn_fib_tables_lock); | 426 | read_unlock(&dn_fib_tables_lock); |
425 | return -1; | 427 | return -1; |
426 | } | 428 | } |
427 | } | 429 | } |
428 | read_unlock(&dn_fib_tables_lock); | 430 | read_unlock(&dn_fib_tables_lock); |
429 | cb->args[1] = m; | 431 | cb->args[2] = m; |
430 | 432 | ||
431 | return skb->len; | 433 | return skb->len; |
432 | } | 434 | } |
433 | 435 | ||
436 | int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
437 | { | ||
438 | unsigned int h, s_h; | ||
439 | unsigned int e = 0, s_e; | ||
440 | struct dn_fib_table *tb; | ||
441 | struct hlist_node *node; | ||
442 | int dumped = 0; | ||
443 | |||
444 | if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) && | ||
445 | ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED) | ||
446 | return dn_cache_dump(skb, cb); | ||
447 | |||
448 | s_h = cb->args[0]; | ||
449 | s_e = cb->args[1]; | ||
450 | |||
451 | for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { | ||
452 | e = 0; | ||
453 | hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) { | ||
454 | if (e < s_e) | ||
455 | goto next; | ||
456 | if (dumped) | ||
457 | memset(&cb->args[2], 0, sizeof(cb->args) - | ||
458 | 2 * sizeof(cb->args[0])); | ||
459 | if (tb->dump(tb, skb, cb) < 0) | ||
460 | goto out; | ||
461 | dumped = 1; | ||
462 | next: | ||
463 | e++; | ||
464 | } | ||
465 | } | ||
466 | out: | ||
467 | cb->args[1] = e; | ||
468 | cb->args[0] = h; | ||
469 | |||
470 | return skb->len; | ||
471 | } | ||
472 | |||
434 | static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) | 473 | static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) |
435 | { | 474 | { |
436 | struct dn_hash *table = (struct dn_hash *)tb->data; | 475 | struct dn_hash *table = (struct dn_hash *)tb->data; |
@@ -739,9 +778,11 @@ out: | |||
739 | } | 778 | } |
740 | 779 | ||
741 | 780 | ||
742 | struct dn_fib_table *dn_fib_get_table(int n, int create) | 781 | struct dn_fib_table *dn_fib_get_table(u32 n, int create) |
743 | { | 782 | { |
744 | struct dn_fib_table *t; | 783 | struct dn_fib_table *t; |
784 | struct hlist_node *node; | ||
785 | unsigned int h; | ||
745 | 786 | ||
746 | if (n < RT_TABLE_MIN) | 787 | if (n < RT_TABLE_MIN) |
747 | return NULL; | 788 | return NULL; |
@@ -749,8 +790,15 @@ struct dn_fib_table *dn_fib_get_table(int n, int create) | |||
749 | if (n > RT_TABLE_MAX) | 790 | if (n > RT_TABLE_MAX) |
750 | return NULL; | 791 | return NULL; |
751 | 792 | ||
752 | if (dn_fib_tables[n]) | 793 | h = n & (DN_FIB_TABLE_HASHSZ - 1); |
753 | return dn_fib_tables[n]; | 794 | rcu_read_lock(); |
795 | hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) { | ||
796 | if (t->n == n) { | ||
797 | rcu_read_unlock(); | ||
798 | return t; | ||
799 | } | ||
800 | } | ||
801 | rcu_read_unlock(); | ||
754 | 802 | ||
755 | if (!create) | 803 | if (!create) |
756 | return NULL; | 804 | return NULL; |
@@ -771,33 +819,37 @@ struct dn_fib_table *dn_fib_get_table(int n, int create) | |||
771 | t->flush = dn_fib_table_flush; | 819 | t->flush = dn_fib_table_flush; |
772 | t->dump = dn_fib_table_dump; | 820 | t->dump = dn_fib_table_dump; |
773 | memset(t->data, 0, sizeof(struct dn_hash)); | 821 | memset(t->data, 0, sizeof(struct dn_hash)); |
774 | dn_fib_tables[n] = t; | 822 | hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); |
775 | 823 | ||
776 | return t; | 824 | return t; |
777 | } | 825 | } |
778 | 826 | ||
779 | static void dn_fib_del_tree(int n) | ||
780 | { | ||
781 | struct dn_fib_table *t; | ||
782 | |||
783 | write_lock(&dn_fib_tables_lock); | ||
784 | t = dn_fib_tables[n]; | ||
785 | dn_fib_tables[n] = NULL; | ||
786 | write_unlock(&dn_fib_tables_lock); | ||
787 | |||
788 | kfree(t); | ||
789 | } | ||
790 | |||
791 | struct dn_fib_table *dn_fib_empty_table(void) | 827 | struct dn_fib_table *dn_fib_empty_table(void) |
792 | { | 828 | { |
793 | int id; | 829 | u32 id; |
794 | 830 | ||
795 | for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) | 831 | for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) |
796 | if (dn_fib_tables[id] == NULL) | 832 | if (dn_fib_get_table(id, 0) == NULL) |
797 | return dn_fib_get_table(id, 1); | 833 | return dn_fib_get_table(id, 1); |
798 | return NULL; | 834 | return NULL; |
799 | } | 835 | } |
800 | 836 | ||
837 | void dn_fib_flush(void) | ||
838 | { | ||
839 | int flushed = 0; | ||
840 | struct dn_fib_table *tb; | ||
841 | struct hlist_node *node; | ||
842 | unsigned int h; | ||
843 | |||
844 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { | ||
845 | hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) | ||
846 | flushed += tb->flush(tb); | ||
847 | } | ||
848 | |||
849 | if (flushed) | ||
850 | dn_rt_cache_flush(-1); | ||
851 | } | ||
852 | |||
801 | void __init dn_fib_table_init(void) | 853 | void __init dn_fib_table_init(void) |
802 | { | 854 | { |
803 | dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", | 855 | dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", |
@@ -808,10 +860,17 @@ void __init dn_fib_table_init(void) | |||
808 | 860 | ||
809 | void __exit dn_fib_table_cleanup(void) | 861 | void __exit dn_fib_table_cleanup(void) |
810 | { | 862 | { |
811 | int i; | 863 | struct dn_fib_table *t; |
812 | 864 | struct hlist_node *node, *next; | |
813 | for (i = RT_TABLE_MIN; i <= RT_TABLE_MAX; ++i) | 865 | unsigned int h; |
814 | dn_fib_del_tree(i); | ||
815 | 866 | ||
816 | return; | 867 | write_lock(&dn_fib_tables_lock); |
868 | for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { | ||
869 | hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], | ||
870 | hlist) { | ||
871 | hlist_del(&t->hlist); | ||
872 | kfree(t); | ||
873 | } | ||
874 | } | ||
875 | write_unlock(&dn_fib_tables_lock); | ||
817 | } | 876 | } |