diff options
Diffstat (limited to 'net/ipv4/fib_semantics.c')
| -rw-r--r-- | net/ipv4/fib_semantics.c | 258 |
1 files changed, 135 insertions, 123 deletions
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 12d3dc3df1b7..641a5a2a9f9c 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | static DEFINE_SPINLOCK(fib_info_lock); | 49 | static DEFINE_SPINLOCK(fib_info_lock); |
| 50 | static struct hlist_head *fib_info_hash; | 50 | static struct hlist_head *fib_info_hash; |
| 51 | static struct hlist_head *fib_info_laddrhash; | 51 | static struct hlist_head *fib_info_laddrhash; |
| 52 | static unsigned int fib_hash_size; | 52 | static unsigned int fib_info_hash_size; |
| 53 | static unsigned int fib_info_cnt; | 53 | static unsigned int fib_info_cnt; |
| 54 | 54 | ||
| 55 | #define DEVINDEX_HASHBITS 8 | 55 | #define DEVINDEX_HASHBITS 8 |
| @@ -90,11 +90,7 @@ static DEFINE_SPINLOCK(fib_multipath_lock); | |||
| 90 | #define endfor_nexthops(fi) } | 90 | #define endfor_nexthops(fi) } |
| 91 | 91 | ||
| 92 | 92 | ||
| 93 | static const struct | 93 | const struct fib_prop fib_props[RTN_MAX + 1] = { |
| 94 | { | ||
| 95 | int error; | ||
| 96 | u8 scope; | ||
| 97 | } fib_props[RTN_MAX + 1] = { | ||
| 98 | [RTN_UNSPEC] = { | 94 | [RTN_UNSPEC] = { |
| 99 | .error = 0, | 95 | .error = 0, |
| 100 | .scope = RT_SCOPE_NOWHERE, | 96 | .scope = RT_SCOPE_NOWHERE, |
| @@ -152,6 +148,8 @@ static void free_fib_info_rcu(struct rcu_head *head) | |||
| 152 | { | 148 | { |
| 153 | struct fib_info *fi = container_of(head, struct fib_info, rcu); | 149 | struct fib_info *fi = container_of(head, struct fib_info, rcu); |
| 154 | 150 | ||
| 151 | if (fi->fib_metrics != (u32 *) dst_default_metrics) | ||
| 152 | kfree(fi->fib_metrics); | ||
| 155 | kfree(fi); | 153 | kfree(fi); |
| 156 | } | 154 | } |
| 157 | 155 | ||
| @@ -200,7 +198,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) | |||
| 200 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 198 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
| 201 | nh->nh_weight != onh->nh_weight || | 199 | nh->nh_weight != onh->nh_weight || |
| 202 | #endif | 200 | #endif |
| 203 | #ifdef CONFIG_NET_CLS_ROUTE | 201 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 204 | nh->nh_tclassid != onh->nh_tclassid || | 202 | nh->nh_tclassid != onh->nh_tclassid || |
| 205 | #endif | 203 | #endif |
| 206 | ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) | 204 | ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) |
| @@ -221,10 +219,10 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val) | |||
| 221 | 219 | ||
| 222 | static inline unsigned int fib_info_hashfn(const struct fib_info *fi) | 220 | static inline unsigned int fib_info_hashfn(const struct fib_info *fi) |
| 223 | { | 221 | { |
| 224 | unsigned int mask = (fib_hash_size - 1); | 222 | unsigned int mask = (fib_info_hash_size - 1); |
| 225 | unsigned int val = fi->fib_nhs; | 223 | unsigned int val = fi->fib_nhs; |
| 226 | 224 | ||
| 227 | val ^= fi->fib_protocol; | 225 | val ^= (fi->fib_protocol << 8) | fi->fib_scope; |
| 228 | val ^= (__force u32)fi->fib_prefsrc; | 226 | val ^= (__force u32)fi->fib_prefsrc; |
| 229 | val ^= fi->fib_priority; | 227 | val ^= fi->fib_priority; |
| 230 | for_nexthops(fi) { | 228 | for_nexthops(fi) { |
| @@ -250,10 +248,11 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi) | |||
| 250 | if (fi->fib_nhs != nfi->fib_nhs) | 248 | if (fi->fib_nhs != nfi->fib_nhs) |
| 251 | continue; | 249 | continue; |
| 252 | if (nfi->fib_protocol == fi->fib_protocol && | 250 | if (nfi->fib_protocol == fi->fib_protocol && |
| 251 | nfi->fib_scope == fi->fib_scope && | ||
| 253 | nfi->fib_prefsrc == fi->fib_prefsrc && | 252 | nfi->fib_prefsrc == fi->fib_prefsrc && |
| 254 | nfi->fib_priority == fi->fib_priority && | 253 | nfi->fib_priority == fi->fib_priority && |
| 255 | memcmp(nfi->fib_metrics, fi->fib_metrics, | 254 | memcmp(nfi->fib_metrics, fi->fib_metrics, |
| 256 | sizeof(fi->fib_metrics)) == 0 && | 255 | sizeof(u32) * RTAX_MAX) == 0 && |
| 257 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && | 256 | ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && |
| 258 | (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) | 257 | (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) |
| 259 | return fi; | 258 | return fi; |
| @@ -330,7 +329,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, | |||
| 330 | goto errout; | 329 | goto errout; |
| 331 | 330 | ||
| 332 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, | 331 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, |
| 333 | fa->fa_type, fa->fa_scope, key, dst_len, | 332 | fa->fa_type, key, dst_len, |
| 334 | fa->fa_tos, fa->fa_info, nlm_flags); | 333 | fa->fa_tos, fa->fa_info, nlm_flags); |
| 335 | if (err < 0) { | 334 | if (err < 0) { |
| 336 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ | 335 | /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ |
| @@ -422,7 +421,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, | |||
| 422 | 421 | ||
| 423 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 422 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
| 424 | nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; | 423 | nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; |
| 425 | #ifdef CONFIG_NET_CLS_ROUTE | 424 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 426 | nla = nla_find(attrs, attrlen, RTA_FLOW); | 425 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
| 427 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; | 426 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; |
| 428 | #endif | 427 | #endif |
| @@ -476,7 +475,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | |||
| 476 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 475 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
| 477 | if (nla && nla_get_be32(nla) != nh->nh_gw) | 476 | if (nla && nla_get_be32(nla) != nh->nh_gw) |
| 478 | return 1; | 477 | return 1; |
| 479 | #ifdef CONFIG_NET_CLS_ROUTE | 478 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 480 | nla = nla_find(attrs, attrlen, RTA_FLOW); | 479 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
| 481 | if (nla && nla_get_u32(nla) != nh->nh_tclassid) | 480 | if (nla && nla_get_u32(nla) != nh->nh_tclassid) |
| 482 | return 1; | 481 | return 1; |
| @@ -562,16 +561,16 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, | |||
| 562 | } | 561 | } |
| 563 | rcu_read_lock(); | 562 | rcu_read_lock(); |
| 564 | { | 563 | { |
| 565 | struct flowi fl = { | 564 | struct flowi4 fl4 = { |
| 566 | .fl4_dst = nh->nh_gw, | 565 | .daddr = nh->nh_gw, |
| 567 | .fl4_scope = cfg->fc_scope + 1, | 566 | .flowi4_scope = cfg->fc_scope + 1, |
| 568 | .oif = nh->nh_oif, | 567 | .flowi4_oif = nh->nh_oif, |
| 569 | }; | 568 | }; |
| 570 | 569 | ||
| 571 | /* It is not necessary, but requires a bit of thinking */ | 570 | /* It is not necessary, but requires a bit of thinking */ |
| 572 | if (fl.fl4_scope < RT_SCOPE_LINK) | 571 | if (fl4.flowi4_scope < RT_SCOPE_LINK) |
| 573 | fl.fl4_scope = RT_SCOPE_LINK; | 572 | fl4.flowi4_scope = RT_SCOPE_LINK; |
| 574 | err = fib_lookup(net, &fl, &res); | 573 | err = fib_lookup(net, &fl4, &res); |
| 575 | if (err) { | 574 | if (err) { |
| 576 | rcu_read_unlock(); | 575 | rcu_read_unlock(); |
| 577 | return err; | 576 | return err; |
| @@ -613,14 +612,14 @@ out: | |||
| 613 | 612 | ||
| 614 | static inline unsigned int fib_laddr_hashfn(__be32 val) | 613 | static inline unsigned int fib_laddr_hashfn(__be32 val) |
| 615 | { | 614 | { |
| 616 | unsigned int mask = (fib_hash_size - 1); | 615 | unsigned int mask = (fib_info_hash_size - 1); |
| 617 | 616 | ||
| 618 | return ((__force u32)val ^ | 617 | return ((__force u32)val ^ |
| 619 | ((__force u32)val >> 7) ^ | 618 | ((__force u32)val >> 7) ^ |
| 620 | ((__force u32)val >> 14)) & mask; | 619 | ((__force u32)val >> 14)) & mask; |
| 621 | } | 620 | } |
| 622 | 621 | ||
| 623 | static struct hlist_head *fib_hash_alloc(int bytes) | 622 | static struct hlist_head *fib_info_hash_alloc(int bytes) |
| 624 | { | 623 | { |
| 625 | if (bytes <= PAGE_SIZE) | 624 | if (bytes <= PAGE_SIZE) |
| 626 | return kzalloc(bytes, GFP_KERNEL); | 625 | return kzalloc(bytes, GFP_KERNEL); |
| @@ -630,7 +629,7 @@ static struct hlist_head *fib_hash_alloc(int bytes) | |||
| 630 | get_order(bytes)); | 629 | get_order(bytes)); |
| 631 | } | 630 | } |
| 632 | 631 | ||
| 633 | static void fib_hash_free(struct hlist_head *hash, int bytes) | 632 | static void fib_info_hash_free(struct hlist_head *hash, int bytes) |
| 634 | { | 633 | { |
| 635 | if (!hash) | 634 | if (!hash) |
| 636 | return; | 635 | return; |
| @@ -641,18 +640,18 @@ static void fib_hash_free(struct hlist_head *hash, int bytes) | |||
| 641 | free_pages((unsigned long) hash, get_order(bytes)); | 640 | free_pages((unsigned long) hash, get_order(bytes)); |
| 642 | } | 641 | } |
| 643 | 642 | ||
| 644 | static void fib_hash_move(struct hlist_head *new_info_hash, | 643 | static void fib_info_hash_move(struct hlist_head *new_info_hash, |
| 645 | struct hlist_head *new_laddrhash, | 644 | struct hlist_head *new_laddrhash, |
| 646 | unsigned int new_size) | 645 | unsigned int new_size) |
| 647 | { | 646 | { |
| 648 | struct hlist_head *old_info_hash, *old_laddrhash; | 647 | struct hlist_head *old_info_hash, *old_laddrhash; |
| 649 | unsigned int old_size = fib_hash_size; | 648 | unsigned int old_size = fib_info_hash_size; |
| 650 | unsigned int i, bytes; | 649 | unsigned int i, bytes; |
| 651 | 650 | ||
| 652 | spin_lock_bh(&fib_info_lock); | 651 | spin_lock_bh(&fib_info_lock); |
| 653 | old_info_hash = fib_info_hash; | 652 | old_info_hash = fib_info_hash; |
| 654 | old_laddrhash = fib_info_laddrhash; | 653 | old_laddrhash = fib_info_laddrhash; |
| 655 | fib_hash_size = new_size; | 654 | fib_info_hash_size = new_size; |
| 656 | 655 | ||
| 657 | for (i = 0; i < old_size; i++) { | 656 | for (i = 0; i < old_size; i++) { |
| 658 | struct hlist_head *head = &fib_info_hash[i]; | 657 | struct hlist_head *head = &fib_info_hash[i]; |
| @@ -693,8 +692,18 @@ static void fib_hash_move(struct hlist_head *new_info_hash, | |||
| 693 | spin_unlock_bh(&fib_info_lock); | 692 | spin_unlock_bh(&fib_info_lock); |
| 694 | 693 | ||
| 695 | bytes = old_size * sizeof(struct hlist_head *); | 694 | bytes = old_size * sizeof(struct hlist_head *); |
| 696 | fib_hash_free(old_info_hash, bytes); | 695 | fib_info_hash_free(old_info_hash, bytes); |
| 697 | fib_hash_free(old_laddrhash, bytes); | 696 | fib_info_hash_free(old_laddrhash, bytes); |
| 697 | } | ||
| 698 | |||
| 699 | __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) | ||
| 700 | { | ||
| 701 | nh->nh_saddr = inet_select_addr(nh->nh_dev, | ||
| 702 | nh->nh_gw, | ||
| 703 | nh->nh_parent->fib_scope); | ||
| 704 | nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); | ||
| 705 | |||
| 706 | return nh->nh_saddr; | ||
| 698 | } | 707 | } |
| 699 | 708 | ||
| 700 | struct fib_info *fib_create_info(struct fib_config *cfg) | 709 | struct fib_info *fib_create_info(struct fib_config *cfg) |
| @@ -705,6 +714,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 705 | int nhs = 1; | 714 | int nhs = 1; |
| 706 | struct net *net = cfg->fc_nlinfo.nl_net; | 715 | struct net *net = cfg->fc_nlinfo.nl_net; |
| 707 | 716 | ||
| 717 | if (cfg->fc_type > RTN_MAX) | ||
| 718 | goto err_inval; | ||
| 719 | |||
| 708 | /* Fast check to catch the most weird cases */ | 720 | /* Fast check to catch the most weird cases */ |
| 709 | if (fib_props[cfg->fc_type].scope > cfg->fc_scope) | 721 | if (fib_props[cfg->fc_type].scope > cfg->fc_scope) |
| 710 | goto err_inval; | 722 | goto err_inval; |
| @@ -718,8 +730,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 718 | #endif | 730 | #endif |
| 719 | 731 | ||
| 720 | err = -ENOBUFS; | 732 | err = -ENOBUFS; |
| 721 | if (fib_info_cnt >= fib_hash_size) { | 733 | if (fib_info_cnt >= fib_info_hash_size) { |
| 722 | unsigned int new_size = fib_hash_size << 1; | 734 | unsigned int new_size = fib_info_hash_size << 1; |
| 723 | struct hlist_head *new_info_hash; | 735 | struct hlist_head *new_info_hash; |
| 724 | struct hlist_head *new_laddrhash; | 736 | struct hlist_head *new_laddrhash; |
| 725 | unsigned int bytes; | 737 | unsigned int bytes; |
| @@ -727,25 +739,32 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 727 | if (!new_size) | 739 | if (!new_size) |
| 728 | new_size = 1; | 740 | new_size = 1; |
| 729 | bytes = new_size * sizeof(struct hlist_head *); | 741 | bytes = new_size * sizeof(struct hlist_head *); |
| 730 | new_info_hash = fib_hash_alloc(bytes); | 742 | new_info_hash = fib_info_hash_alloc(bytes); |
| 731 | new_laddrhash = fib_hash_alloc(bytes); | 743 | new_laddrhash = fib_info_hash_alloc(bytes); |
| 732 | if (!new_info_hash || !new_laddrhash) { | 744 | if (!new_info_hash || !new_laddrhash) { |
| 733 | fib_hash_free(new_info_hash, bytes); | 745 | fib_info_hash_free(new_info_hash, bytes); |
| 734 | fib_hash_free(new_laddrhash, bytes); | 746 | fib_info_hash_free(new_laddrhash, bytes); |
| 735 | } else | 747 | } else |
| 736 | fib_hash_move(new_info_hash, new_laddrhash, new_size); | 748 | fib_info_hash_move(new_info_hash, new_laddrhash, new_size); |
| 737 | 749 | ||
| 738 | if (!fib_hash_size) | 750 | if (!fib_info_hash_size) |
| 739 | goto failure; | 751 | goto failure; |
| 740 | } | 752 | } |
| 741 | 753 | ||
| 742 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); | 754 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); |
| 743 | if (fi == NULL) | 755 | if (fi == NULL) |
| 744 | goto failure; | 756 | goto failure; |
| 757 | if (cfg->fc_mx) { | ||
| 758 | fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); | ||
| 759 | if (!fi->fib_metrics) | ||
| 760 | goto failure; | ||
| 761 | } else | ||
| 762 | fi->fib_metrics = (u32 *) dst_default_metrics; | ||
| 745 | fib_info_cnt++; | 763 | fib_info_cnt++; |
| 746 | 764 | ||
| 747 | fi->fib_net = hold_net(net); | 765 | fi->fib_net = hold_net(net); |
| 748 | fi->fib_protocol = cfg->fc_protocol; | 766 | fi->fib_protocol = cfg->fc_protocol; |
| 767 | fi->fib_scope = cfg->fc_scope; | ||
| 749 | fi->fib_flags = cfg->fc_flags; | 768 | fi->fib_flags = cfg->fc_flags; |
| 750 | fi->fib_priority = cfg->fc_priority; | 769 | fi->fib_priority = cfg->fc_priority; |
| 751 | fi->fib_prefsrc = cfg->fc_prefsrc; | 770 | fi->fib_prefsrc = cfg->fc_prefsrc; |
| @@ -779,7 +798,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 779 | goto err_inval; | 798 | goto err_inval; |
| 780 | if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) | 799 | if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) |
| 781 | goto err_inval; | 800 | goto err_inval; |
| 782 | #ifdef CONFIG_NET_CLS_ROUTE | 801 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 783 | if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) | 802 | if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) |
| 784 | goto err_inval; | 803 | goto err_inval; |
| 785 | #endif | 804 | #endif |
| @@ -792,7 +811,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 792 | nh->nh_oif = cfg->fc_oif; | 811 | nh->nh_oif = cfg->fc_oif; |
| 793 | nh->nh_gw = cfg->fc_gw; | 812 | nh->nh_gw = cfg->fc_gw; |
| 794 | nh->nh_flags = cfg->fc_flags; | 813 | nh->nh_flags = cfg->fc_flags; |
| 795 | #ifdef CONFIG_NET_CLS_ROUTE | 814 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 796 | nh->nh_tclassid = cfg->fc_flow; | 815 | nh->nh_tclassid = cfg->fc_flow; |
| 797 | #endif | 816 | #endif |
| 798 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 817 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
| @@ -804,6 +823,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 804 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) | 823 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) |
| 805 | goto err_inval; | 824 | goto err_inval; |
| 806 | goto link_it; | 825 | goto link_it; |
| 826 | } else { | ||
| 827 | switch (cfg->fc_type) { | ||
| 828 | case RTN_UNICAST: | ||
| 829 | case RTN_LOCAL: | ||
| 830 | case RTN_BROADCAST: | ||
| 831 | case RTN_ANYCAST: | ||
| 832 | case RTN_MULTICAST: | ||
| 833 | break; | ||
| 834 | default: | ||
| 835 | goto err_inval; | ||
| 836 | } | ||
| 807 | } | 837 | } |
| 808 | 838 | ||
| 809 | if (cfg->fc_scope > RT_SCOPE_HOST) | 839 | if (cfg->fc_scope > RT_SCOPE_HOST) |
| @@ -835,6 +865,10 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
| 835 | goto err_inval; | 865 | goto err_inval; |
| 836 | } | 866 | } |
| 837 | 867 | ||
| 868 | change_nexthops(fi) { | ||
| 869 | fib_info_update_nh_saddr(net, nexthop_nh); | ||
| 870 | } endfor_nexthops(fi) | ||
| 871 | |||
| 838 | link_it: | 872 | link_it: |
| 839 | ofi = fib_find_info(fi); | 873 | ofi = fib_find_info(fi); |
| 840 | if (ofi) { | 874 | if (ofi) { |
| @@ -880,86 +914,8 @@ failure: | |||
| 880 | return ERR_PTR(err); | 914 | return ERR_PTR(err); |
| 881 | } | 915 | } |
| 882 | 916 | ||
| 883 | /* Note! fib_semantic_match intentionally uses RCU list functions. */ | ||
| 884 | int fib_semantic_match(struct list_head *head, const struct flowi *flp, | ||
| 885 | struct fib_result *res, int prefixlen, int fib_flags) | ||
| 886 | { | ||
| 887 | struct fib_alias *fa; | ||
| 888 | int nh_sel = 0; | ||
| 889 | |||
| 890 | list_for_each_entry_rcu(fa, head, fa_list) { | ||
| 891 | int err; | ||
| 892 | |||
| 893 | if (fa->fa_tos && | ||
| 894 | fa->fa_tos != flp->fl4_tos) | ||
| 895 | continue; | ||
| 896 | |||
| 897 | if (fa->fa_scope < flp->fl4_scope) | ||
| 898 | continue; | ||
| 899 | |||
| 900 | fib_alias_accessed(fa); | ||
| 901 | |||
| 902 | err = fib_props[fa->fa_type].error; | ||
| 903 | if (err == 0) { | ||
| 904 | struct fib_info *fi = fa->fa_info; | ||
| 905 | |||
| 906 | if (fi->fib_flags & RTNH_F_DEAD) | ||
| 907 | continue; | ||
| 908 | |||
| 909 | switch (fa->fa_type) { | ||
| 910 | case RTN_UNICAST: | ||
| 911 | case RTN_LOCAL: | ||
| 912 | case RTN_BROADCAST: | ||
| 913 | case RTN_ANYCAST: | ||
| 914 | case RTN_MULTICAST: | ||
| 915 | for_nexthops(fi) { | ||
| 916 | if (nh->nh_flags & RTNH_F_DEAD) | ||
| 917 | continue; | ||
| 918 | if (!flp->oif || flp->oif == nh->nh_oif) | ||
| 919 | break; | ||
| 920 | } | ||
| 921 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | ||
| 922 | if (nhsel < fi->fib_nhs) { | ||
| 923 | nh_sel = nhsel; | ||
| 924 | goto out_fill_res; | ||
| 925 | } | ||
| 926 | #else | ||
| 927 | if (nhsel < 1) | ||
| 928 | goto out_fill_res; | ||
| 929 | #endif | ||
| 930 | endfor_nexthops(fi); | ||
| 931 | continue; | ||
| 932 | |||
| 933 | default: | ||
| 934 | pr_warning("fib_semantic_match bad type %#x\n", | ||
| 935 | fa->fa_type); | ||
| 936 | return -EINVAL; | ||
| 937 | } | ||
| 938 | } | ||
| 939 | return err; | ||
| 940 | } | ||
| 941 | return 1; | ||
| 942 | |||
| 943 | out_fill_res: | ||
| 944 | res->prefixlen = prefixlen; | ||
| 945 | res->nh_sel = nh_sel; | ||
| 946 | res->type = fa->fa_type; | ||
| 947 | res->scope = fa->fa_scope; | ||
| 948 | res->fi = fa->fa_info; | ||
| 949 | if (!(fib_flags & FIB_LOOKUP_NOREF)) | ||
| 950 | atomic_inc(&res->fi->fib_clntref); | ||
| 951 | return 0; | ||
| 952 | } | ||
| 953 | |||
| 954 | /* Find appropriate source address to this destination */ | ||
| 955 | |||
| 956 | __be32 __fib_res_prefsrc(struct fib_result *res) | ||
| 957 | { | ||
| 958 | return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope); | ||
| 959 | } | ||
| 960 | |||
| 961 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 917 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
| 962 | u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos, | 918 | u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, |
| 963 | struct fib_info *fi, unsigned int flags) | 919 | struct fib_info *fi, unsigned int flags) |
| 964 | { | 920 | { |
| 965 | struct nlmsghdr *nlh; | 921 | struct nlmsghdr *nlh; |
| @@ -981,7 +937,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
| 981 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); | 937 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); |
| 982 | rtm->rtm_type = type; | 938 | rtm->rtm_type = type; |
| 983 | rtm->rtm_flags = fi->fib_flags; | 939 | rtm->rtm_flags = fi->fib_flags; |
| 984 | rtm->rtm_scope = scope; | 940 | rtm->rtm_scope = fi->fib_scope; |
| 985 | rtm->rtm_protocol = fi->fib_protocol; | 941 | rtm->rtm_protocol = fi->fib_protocol; |
| 986 | 942 | ||
| 987 | if (rtm->rtm_dst_len) | 943 | if (rtm->rtm_dst_len) |
| @@ -1002,7 +958,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
| 1002 | 958 | ||
| 1003 | if (fi->fib_nh->nh_oif) | 959 | if (fi->fib_nh->nh_oif) |
| 1004 | NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); | 960 | NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); |
| 1005 | #ifdef CONFIG_NET_CLS_ROUTE | 961 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 1006 | if (fi->fib_nh[0].nh_tclassid) | 962 | if (fi->fib_nh[0].nh_tclassid) |
| 1007 | NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); | 963 | NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); |
| 1008 | #endif | 964 | #endif |
| @@ -1027,7 +983,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
| 1027 | 983 | ||
| 1028 | if (nh->nh_gw) | 984 | if (nh->nh_gw) |
| 1029 | NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); | 985 | NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); |
| 1030 | #ifdef CONFIG_NET_CLS_ROUTE | 986 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 1031 | if (nh->nh_tclassid) | 987 | if (nh->nh_tclassid) |
| 1032 | NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); | 988 | NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); |
| 1033 | #endif | 989 | #endif |
| @@ -1125,6 +1081,62 @@ int fib_sync_down_dev(struct net_device *dev, int force) | |||
| 1125 | return ret; | 1081 | return ret; |
| 1126 | } | 1082 | } |
| 1127 | 1083 | ||
| 1084 | /* Must be invoked inside of an RCU protected region. */ | ||
| 1085 | void fib_select_default(struct fib_result *res) | ||
| 1086 | { | ||
| 1087 | struct fib_info *fi = NULL, *last_resort = NULL; | ||
| 1088 | struct list_head *fa_head = res->fa_head; | ||
| 1089 | struct fib_table *tb = res->table; | ||
| 1090 | int order = -1, last_idx = -1; | ||
| 1091 | struct fib_alias *fa; | ||
| 1092 | |||
| 1093 | list_for_each_entry_rcu(fa, fa_head, fa_list) { | ||
| 1094 | struct fib_info *next_fi = fa->fa_info; | ||
| 1095 | |||
| 1096 | if (next_fi->fib_scope != res->scope || | ||
| 1097 | fa->fa_type != RTN_UNICAST) | ||
| 1098 | continue; | ||
| 1099 | |||
| 1100 | if (next_fi->fib_priority > res->fi->fib_priority) | ||
| 1101 | break; | ||
| 1102 | if (!next_fi->fib_nh[0].nh_gw || | ||
| 1103 | next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) | ||
| 1104 | continue; | ||
| 1105 | |||
| 1106 | fib_alias_accessed(fa); | ||
| 1107 | |||
| 1108 | if (fi == NULL) { | ||
| 1109 | if (next_fi != res->fi) | ||
| 1110 | break; | ||
| 1111 | } else if (!fib_detect_death(fi, order, &last_resort, | ||
| 1112 | &last_idx, tb->tb_default)) { | ||
| 1113 | fib_result_assign(res, fi); | ||
| 1114 | tb->tb_default = order; | ||
| 1115 | goto out; | ||
| 1116 | } | ||
| 1117 | fi = next_fi; | ||
| 1118 | order++; | ||
| 1119 | } | ||
| 1120 | |||
| 1121 | if (order <= 0 || fi == NULL) { | ||
| 1122 | tb->tb_default = -1; | ||
| 1123 | goto out; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | if (!fib_detect_death(fi, order, &last_resort, &last_idx, | ||
| 1127 | tb->tb_default)) { | ||
| 1128 | fib_result_assign(res, fi); | ||
| 1129 | tb->tb_default = order; | ||
| 1130 | goto out; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | if (last_idx >= 0) | ||
| 1134 | fib_result_assign(res, last_resort); | ||
| 1135 | tb->tb_default = last_idx; | ||
| 1136 | out: | ||
| 1137 | return; | ||
| 1138 | } | ||
| 1139 | |||
| 1128 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1140 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
| 1129 | 1141 | ||
| 1130 | /* | 1142 | /* |
| @@ -1189,7 +1201,7 @@ int fib_sync_up(struct net_device *dev) | |||
| 1189 | * The algorithm is suboptimal, but it provides really | 1201 | * The algorithm is suboptimal, but it provides really |
| 1190 | * fair weighted route distribution. | 1202 | * fair weighted route distribution. |
| 1191 | */ | 1203 | */ |
| 1192 | void fib_select_multipath(const struct flowi *flp, struct fib_result *res) | 1204 | void fib_select_multipath(struct fib_result *res) |
| 1193 | { | 1205 | { |
| 1194 | struct fib_info *fi = res->fi; | 1206 | struct fib_info *fi = res->fi; |
| 1195 | int w; | 1207 | int w; |
