diff options
Diffstat (limited to 'net/ipv4/fib_semantics.c')
-rw-r--r-- | net/ipv4/fib_semantics.c | 257 |
1 files changed, 139 insertions, 118 deletions
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 12d3dc3df1b7..622ac4c95026 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -49,7 +49,7 @@ | |||
49 | static DEFINE_SPINLOCK(fib_info_lock); | 49 | static DEFINE_SPINLOCK(fib_info_lock); |
50 | static struct hlist_head *fib_info_hash; | 50 | static struct hlist_head *fib_info_hash; |
51 | static struct hlist_head *fib_info_laddrhash; | 51 | static struct hlist_head *fib_info_laddrhash; |
52 | static unsigned int fib_hash_size; | 52 | static unsigned int fib_info_hash_size; |
53 | static unsigned int fib_info_cnt; | 53 | static unsigned int fib_info_cnt; |
54 | 54 | ||
55 | #define DEVINDEX_HASHBITS 8 | 55 | #define DEVINDEX_HASHBITS 8 |
@@ -90,11 +90,7 @@ static DEFINE_SPINLOCK(fib_multipath_lock); | |||
90 | #define endfor_nexthops(fi) } | 90 | #define endfor_nexthops(fi) } |
91 | 91 | ||
92 | 92 | ||
93 | static const struct | 93 | const struct fib_prop fib_props[RTN_MAX + 1] = { |
94 | { | ||
95 | int error; | ||
96 | u8 scope; | ||
97 | } fib_props[RTN_MAX + 1] = { | ||
98 | [RTN_UNSPEC] = { | 94 | [RTN_UNSPEC] = { |
99 | .error = 0, | 95 | .error = 0, |
100 | .scope = RT_SCOPE_NOWHERE, | 96 | .scope = RT_SCOPE_NOWHERE, |
@@ -152,6 +148,8 @@ static void free_fib_info_rcu(struct rcu_head *head) | |||
152 | { | 148 | { |
153 | struct fib_info *fi = container_of(head, struct fib_info, rcu); | 149 | struct fib_info *fi = container_of(head, struct fib_info, rcu); |
154 | 150 | ||
151 | if (fi->fib_metrics != (u32 *) dst_default_metrics) | ||
152 | kfree(fi->fib_metrics); | ||
155 | kfree(fi); | 153 | kfree(fi); |
156 | } | 154 | } |
157 | 155 | ||
@@ -200,7 +198,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) | |||
200 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 198 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
201 | nh->nh_weight != onh->nh_weight || | 199 | nh->nh_weight != onh->nh_weight || |
202 | #endif | 200 | #endif |
203 | #ifdef CONFIG_NET_CLS_ROUTE | 201 | #ifdef CONFIG_IP_ROUTE_CLASSID |
204 | nh->nh_tclassid != onh->nh_tclassid || | 202 | nh->nh_tclassid != onh->nh_tclassid || |
205 | #endif | 203 | #endif |
206 | ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) | 204 | ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) |
@@ -221,7 +219,7 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val) | |||
221 | 219 | ||
222 | static inline unsigned int fib_info_hashfn(const struct fib_info *fi) | 220 | static inline unsigned int fib_info_hashfn(const struct fib_info *fi) |
223 | { | 221 | { |
224 | unsigned int mask = (fib_hash_size - 1); | 222 | unsigned int mask = (fib_info_hash_size - 1); |
225 | unsigned int val = fi->fib_nhs; | 223 | unsigned int val = fi->fib_nhs; |
226 | 224 | ||
227 | val ^= fi->fib_protocol; | 225 | val ^= fi->fib_protocol; |
@@ -422,7 +420,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, | |||
422 | 420 | ||
423 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 421 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
424 | nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; | 422 | nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; |
425 | #ifdef CONFIG_NET_CLS_ROUTE | 423 | #ifdef CONFIG_IP_ROUTE_CLASSID |
426 | nla = nla_find(attrs, attrlen, RTA_FLOW); | 424 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
427 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; | 425 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; |
428 | #endif | 426 | #endif |
@@ -476,7 +474,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | |||
476 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 474 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
477 | if (nla && nla_get_be32(nla) != nh->nh_gw) | 475 | if (nla && nla_get_be32(nla) != nh->nh_gw) |
478 | return 1; | 476 | return 1; |
479 | #ifdef CONFIG_NET_CLS_ROUTE | 477 | #ifdef CONFIG_IP_ROUTE_CLASSID |
480 | nla = nla_find(attrs, attrlen, RTA_FLOW); | 478 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
481 | if (nla && nla_get_u32(nla) != nh->nh_tclassid) | 479 | if (nla && nla_get_u32(nla) != nh->nh_tclassid) |
482 | return 1; | 480 | return 1; |
@@ -562,16 +560,16 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, | |||
562 | } | 560 | } |
563 | rcu_read_lock(); | 561 | rcu_read_lock(); |
564 | { | 562 | { |
565 | struct flowi fl = { | 563 | struct flowi4 fl4 = { |
566 | .fl4_dst = nh->nh_gw, | 564 | .daddr = nh->nh_gw, |
567 | .fl4_scope = cfg->fc_scope + 1, | 565 | .flowi4_scope = cfg->fc_scope + 1, |
568 | .oif = nh->nh_oif, | 566 | .flowi4_oif = nh->nh_oif, |
569 | }; | 567 | }; |
570 | 568 | ||
571 | /* It is not necessary, but requires a bit of thinking */ | 569 | /* It is not necessary, but requires a bit of thinking */ |
572 | if (fl.fl4_scope < RT_SCOPE_LINK) | 570 | if (fl4.flowi4_scope < RT_SCOPE_LINK) |
573 | fl.fl4_scope = RT_SCOPE_LINK; | 571 | fl4.flowi4_scope = RT_SCOPE_LINK; |
574 | err = fib_lookup(net, &fl, &res); | 572 | err = fib_lookup(net, &fl4, &res); |
575 | if (err) { | 573 | if (err) { |
576 | rcu_read_unlock(); | 574 | rcu_read_unlock(); |
577 | return err; | 575 | return err; |
@@ -613,14 +611,14 @@ out: | |||
613 | 611 | ||
614 | static inline unsigned int fib_laddr_hashfn(__be32 val) | 612 | static inline unsigned int fib_laddr_hashfn(__be32 val) |
615 | { | 613 | { |
616 | unsigned int mask = (fib_hash_size - 1); | 614 | unsigned int mask = (fib_info_hash_size - 1); |
617 | 615 | ||
618 | return ((__force u32)val ^ | 616 | return ((__force u32)val ^ |
619 | ((__force u32)val >> 7) ^ | 617 | ((__force u32)val >> 7) ^ |
620 | ((__force u32)val >> 14)) & mask; | 618 | ((__force u32)val >> 14)) & mask; |
621 | } | 619 | } |
622 | 620 | ||
623 | static struct hlist_head *fib_hash_alloc(int bytes) | 621 | static struct hlist_head *fib_info_hash_alloc(int bytes) |
624 | { | 622 | { |
625 | if (bytes <= PAGE_SIZE) | 623 | if (bytes <= PAGE_SIZE) |
626 | return kzalloc(bytes, GFP_KERNEL); | 624 | return kzalloc(bytes, GFP_KERNEL); |
@@ -630,7 +628,7 @@ static struct hlist_head *fib_hash_alloc(int bytes) | |||
630 | get_order(bytes)); | 628 | get_order(bytes)); |
631 | } | 629 | } |
632 | 630 | ||
633 | static void fib_hash_free(struct hlist_head *hash, int bytes) | 631 | static void fib_info_hash_free(struct hlist_head *hash, int bytes) |
634 | { | 632 | { |
635 | if (!hash) | 633 | if (!hash) |
636 | return; | 634 | return; |
@@ -641,18 +639,18 @@ static void fib_hash_free(struct hlist_head *hash, int bytes) | |||
641 | free_pages((unsigned long) hash, get_order(bytes)); | 639 | free_pages((unsigned long) hash, get_order(bytes)); |
642 | } | 640 | } |
643 | 641 | ||
644 | static void fib_hash_move(struct hlist_head *new_info_hash, | 642 | static void fib_info_hash_move(struct hlist_head *new_info_hash, |
645 | struct hlist_head *new_laddrhash, | 643 | struct hlist_head *new_laddrhash, |
646 | unsigned int new_size) | 644 | unsigned int new_size) |
647 | { | 645 | { |
648 | struct hlist_head *old_info_hash, *old_laddrhash; | 646 | struct hlist_head *old_info_hash, *old_laddrhash; |
649 | unsigned int old_size = fib_hash_size; | 647 | unsigned int old_size = fib_info_hash_size; |
650 | unsigned int i, bytes; | 648 | unsigned int i, bytes; |
651 | 649 | ||
652 | spin_lock_bh(&fib_info_lock); | 650 | spin_lock_bh(&fib_info_lock); |
653 | old_info_hash = fib_info_hash; | 651 | old_info_hash = fib_info_hash; |
654 | old_laddrhash = fib_info_laddrhash; | 652 | old_laddrhash = fib_info_laddrhash; |
655 | fib_hash_size = new_size; | 653 | fib_info_hash_size = new_size; |
656 | 654 | ||
657 | for (i = 0; i < old_size; i++) { | 655 | for (i = 0; i < old_size; i++) { |
658 | struct hlist_head *head = &fib_info_hash[i]; | 656 | struct hlist_head *head = &fib_info_hash[i]; |
@@ -693,8 +691,8 @@ static void fib_hash_move(struct hlist_head *new_info_hash, | |||
693 | spin_unlock_bh(&fib_info_lock); | 691 | spin_unlock_bh(&fib_info_lock); |
694 | 692 | ||
695 | bytes = old_size * sizeof(struct hlist_head *); | 693 | bytes = old_size * sizeof(struct hlist_head *); |
696 | fib_hash_free(old_info_hash, bytes); | 694 | fib_info_hash_free(old_info_hash, bytes); |
697 | fib_hash_free(old_laddrhash, bytes); | 695 | fib_info_hash_free(old_laddrhash, bytes); |
698 | } | 696 | } |
699 | 697 | ||
700 | struct fib_info *fib_create_info(struct fib_config *cfg) | 698 | struct fib_info *fib_create_info(struct fib_config *cfg) |
@@ -705,6 +703,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
705 | int nhs = 1; | 703 | int nhs = 1; |
706 | struct net *net = cfg->fc_nlinfo.nl_net; | 704 | struct net *net = cfg->fc_nlinfo.nl_net; |
707 | 705 | ||
706 | if (cfg->fc_type > RTN_MAX) | ||
707 | goto err_inval; | ||
708 | |||
708 | /* Fast check to catch the most weird cases */ | 709 | /* Fast check to catch the most weird cases */ |
709 | if (fib_props[cfg->fc_type].scope > cfg->fc_scope) | 710 | if (fib_props[cfg->fc_type].scope > cfg->fc_scope) |
710 | goto err_inval; | 711 | goto err_inval; |
@@ -718,8 +719,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
718 | #endif | 719 | #endif |
719 | 720 | ||
720 | err = -ENOBUFS; | 721 | err = -ENOBUFS; |
721 | if (fib_info_cnt >= fib_hash_size) { | 722 | if (fib_info_cnt >= fib_info_hash_size) { |
722 | unsigned int new_size = fib_hash_size << 1; | 723 | unsigned int new_size = fib_info_hash_size << 1; |
723 | struct hlist_head *new_info_hash; | 724 | struct hlist_head *new_info_hash; |
724 | struct hlist_head *new_laddrhash; | 725 | struct hlist_head *new_laddrhash; |
725 | unsigned int bytes; | 726 | unsigned int bytes; |
@@ -727,21 +728,27 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
727 | if (!new_size) | 728 | if (!new_size) |
728 | new_size = 1; | 729 | new_size = 1; |
729 | bytes = new_size * sizeof(struct hlist_head *); | 730 | bytes = new_size * sizeof(struct hlist_head *); |
730 | new_info_hash = fib_hash_alloc(bytes); | 731 | new_info_hash = fib_info_hash_alloc(bytes); |
731 | new_laddrhash = fib_hash_alloc(bytes); | 732 | new_laddrhash = fib_info_hash_alloc(bytes); |
732 | if (!new_info_hash || !new_laddrhash) { | 733 | if (!new_info_hash || !new_laddrhash) { |
733 | fib_hash_free(new_info_hash, bytes); | 734 | fib_info_hash_free(new_info_hash, bytes); |
734 | fib_hash_free(new_laddrhash, bytes); | 735 | fib_info_hash_free(new_laddrhash, bytes); |
735 | } else | 736 | } else |
736 | fib_hash_move(new_info_hash, new_laddrhash, new_size); | 737 | fib_info_hash_move(new_info_hash, new_laddrhash, new_size); |
737 | 738 | ||
738 | if (!fib_hash_size) | 739 | if (!fib_info_hash_size) |
739 | goto failure; | 740 | goto failure; |
740 | } | 741 | } |
741 | 742 | ||
742 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); | 743 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); |
743 | if (fi == NULL) | 744 | if (fi == NULL) |
744 | goto failure; | 745 | goto failure; |
746 | if (cfg->fc_mx) { | ||
747 | fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); | ||
748 | if (!fi->fib_metrics) | ||
749 | goto failure; | ||
750 | } else | ||
751 | fi->fib_metrics = (u32 *) dst_default_metrics; | ||
745 | fib_info_cnt++; | 752 | fib_info_cnt++; |
746 | 753 | ||
747 | fi->fib_net = hold_net(net); | 754 | fi->fib_net = hold_net(net); |
@@ -779,7 +786,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
779 | goto err_inval; | 786 | goto err_inval; |
780 | if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) | 787 | if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) |
781 | goto err_inval; | 788 | goto err_inval; |
782 | #ifdef CONFIG_NET_CLS_ROUTE | 789 | #ifdef CONFIG_IP_ROUTE_CLASSID |
783 | if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) | 790 | if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) |
784 | goto err_inval; | 791 | goto err_inval; |
785 | #endif | 792 | #endif |
@@ -792,7 +799,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
792 | nh->nh_oif = cfg->fc_oif; | 799 | nh->nh_oif = cfg->fc_oif; |
793 | nh->nh_gw = cfg->fc_gw; | 800 | nh->nh_gw = cfg->fc_gw; |
794 | nh->nh_flags = cfg->fc_flags; | 801 | nh->nh_flags = cfg->fc_flags; |
795 | #ifdef CONFIG_NET_CLS_ROUTE | 802 | #ifdef CONFIG_IP_ROUTE_CLASSID |
796 | nh->nh_tclassid = cfg->fc_flow; | 803 | nh->nh_tclassid = cfg->fc_flow; |
797 | #endif | 804 | #endif |
798 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 805 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
@@ -804,6 +811,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
804 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) | 811 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) |
805 | goto err_inval; | 812 | goto err_inval; |
806 | goto link_it; | 813 | goto link_it; |
814 | } else { | ||
815 | switch (cfg->fc_type) { | ||
816 | case RTN_UNICAST: | ||
817 | case RTN_LOCAL: | ||
818 | case RTN_BROADCAST: | ||
819 | case RTN_ANYCAST: | ||
820 | case RTN_MULTICAST: | ||
821 | break; | ||
822 | default: | ||
823 | goto err_inval; | ||
824 | } | ||
807 | } | 825 | } |
808 | 826 | ||
809 | if (cfg->fc_scope > RT_SCOPE_HOST) | 827 | if (cfg->fc_scope > RT_SCOPE_HOST) |
@@ -835,6 +853,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
835 | goto err_inval; | 853 | goto err_inval; |
836 | } | 854 | } |
837 | 855 | ||
856 | change_nexthops(fi) { | ||
857 | nexthop_nh->nh_cfg_scope = cfg->fc_scope; | ||
858 | nexthop_nh->nh_saddr = inet_select_addr(nexthop_nh->nh_dev, | ||
859 | nexthop_nh->nh_gw, | ||
860 | nexthop_nh->nh_cfg_scope); | ||
861 | } endfor_nexthops(fi) | ||
862 | |||
838 | link_it: | 863 | link_it: |
839 | ofi = fib_find_info(fi); | 864 | ofi = fib_find_info(fi); |
840 | if (ofi) { | 865 | if (ofi) { |
@@ -880,84 +905,6 @@ failure: | |||
880 | return ERR_PTR(err); | 905 | return ERR_PTR(err); |
881 | } | 906 | } |
882 | 907 | ||
883 | /* Note! fib_semantic_match intentionally uses RCU list functions. */ | ||
884 | int fib_semantic_match(struct list_head *head, const struct flowi *flp, | ||
885 | struct fib_result *res, int prefixlen, int fib_flags) | ||
886 | { | ||
887 | struct fib_alias *fa; | ||
888 | int nh_sel = 0; | ||
889 | |||
890 | list_for_each_entry_rcu(fa, head, fa_list) { | ||
891 | int err; | ||
892 | |||
893 | if (fa->fa_tos && | ||
894 | fa->fa_tos != flp->fl4_tos) | ||
895 | continue; | ||
896 | |||
897 | if (fa->fa_scope < flp->fl4_scope) | ||
898 | continue; | ||
899 | |||
900 | fib_alias_accessed(fa); | ||
901 | |||
902 | err = fib_props[fa->fa_type].error; | ||
903 | if (err == 0) { | ||
904 | struct fib_info *fi = fa->fa_info; | ||
905 | |||
906 | if (fi->fib_flags & RTNH_F_DEAD) | ||
907 | continue; | ||
908 | |||
909 | switch (fa->fa_type) { | ||
910 | case RTN_UNICAST: | ||
911 | case RTN_LOCAL: | ||
912 | case RTN_BROADCAST: | ||
913 | case RTN_ANYCAST: | ||
914 | case RTN_MULTICAST: | ||
915 | for_nexthops(fi) { | ||
916 | if (nh->nh_flags & RTNH_F_DEAD) | ||
917 | continue; | ||
918 | if (!flp->oif || flp->oif == nh->nh_oif) | ||
919 | break; | ||
920 | } | ||
921 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | ||
922 | if (nhsel < fi->fib_nhs) { | ||
923 | nh_sel = nhsel; | ||
924 | goto out_fill_res; | ||
925 | } | ||
926 | #else | ||
927 | if (nhsel < 1) | ||
928 | goto out_fill_res; | ||
929 | #endif | ||
930 | endfor_nexthops(fi); | ||
931 | continue; | ||
932 | |||
933 | default: | ||
934 | pr_warning("fib_semantic_match bad type %#x\n", | ||
935 | fa->fa_type); | ||
936 | return -EINVAL; | ||
937 | } | ||
938 | } | ||
939 | return err; | ||
940 | } | ||
941 | return 1; | ||
942 | |||
943 | out_fill_res: | ||
944 | res->prefixlen = prefixlen; | ||
945 | res->nh_sel = nh_sel; | ||
946 | res->type = fa->fa_type; | ||
947 | res->scope = fa->fa_scope; | ||
948 | res->fi = fa->fa_info; | ||
949 | if (!(fib_flags & FIB_LOOKUP_NOREF)) | ||
950 | atomic_inc(&res->fi->fib_clntref); | ||
951 | return 0; | ||
952 | } | ||
953 | |||
954 | /* Find appropriate source address to this destination */ | ||
955 | |||
956 | __be32 __fib_res_prefsrc(struct fib_result *res) | ||
957 | { | ||
958 | return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope); | ||
959 | } | ||
960 | |||
961 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 908 | int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
962 | u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos, | 909 | u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos, |
963 | struct fib_info *fi, unsigned int flags) | 910 | struct fib_info *fi, unsigned int flags) |
@@ -1002,7 +949,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
1002 | 949 | ||
1003 | if (fi->fib_nh->nh_oif) | 950 | if (fi->fib_nh->nh_oif) |
1004 | NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); | 951 | NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); |
1005 | #ifdef CONFIG_NET_CLS_ROUTE | 952 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1006 | if (fi->fib_nh[0].nh_tclassid) | 953 | if (fi->fib_nh[0].nh_tclassid) |
1007 | NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); | 954 | NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); |
1008 | #endif | 955 | #endif |
@@ -1027,7 +974,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
1027 | 974 | ||
1028 | if (nh->nh_gw) | 975 | if (nh->nh_gw) |
1029 | NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); | 976 | NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); |
1030 | #ifdef CONFIG_NET_CLS_ROUTE | 977 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1031 | if (nh->nh_tclassid) | 978 | if (nh->nh_tclassid) |
1032 | NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); | 979 | NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); |
1033 | #endif | 980 | #endif |
@@ -1125,6 +1072,80 @@ int fib_sync_down_dev(struct net_device *dev, int force) | |||
1125 | return ret; | 1072 | return ret; |
1126 | } | 1073 | } |
1127 | 1074 | ||
1075 | /* Must be invoked inside of an RCU protected region. */ | ||
1076 | void fib_select_default(struct fib_result *res) | ||
1077 | { | ||
1078 | struct fib_info *fi = NULL, *last_resort = NULL; | ||
1079 | struct list_head *fa_head = res->fa_head; | ||
1080 | struct fib_table *tb = res->table; | ||
1081 | int order = -1, last_idx = -1; | ||
1082 | struct fib_alias *fa; | ||
1083 | |||
1084 | list_for_each_entry_rcu(fa, fa_head, fa_list) { | ||
1085 | struct fib_info *next_fi = fa->fa_info; | ||
1086 | |||
1087 | if (fa->fa_scope != res->scope || | ||
1088 | fa->fa_type != RTN_UNICAST) | ||
1089 | continue; | ||
1090 | |||
1091 | if (next_fi->fib_priority > res->fi->fib_priority) | ||
1092 | break; | ||
1093 | if (!next_fi->fib_nh[0].nh_gw || | ||
1094 | next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) | ||
1095 | continue; | ||
1096 | |||
1097 | fib_alias_accessed(fa); | ||
1098 | |||
1099 | if (fi == NULL) { | ||
1100 | if (next_fi != res->fi) | ||
1101 | break; | ||
1102 | } else if (!fib_detect_death(fi, order, &last_resort, | ||
1103 | &last_idx, tb->tb_default)) { | ||
1104 | fib_result_assign(res, fi); | ||
1105 | tb->tb_default = order; | ||
1106 | goto out; | ||
1107 | } | ||
1108 | fi = next_fi; | ||
1109 | order++; | ||
1110 | } | ||
1111 | |||
1112 | if (order <= 0 || fi == NULL) { | ||
1113 | tb->tb_default = -1; | ||
1114 | goto out; | ||
1115 | } | ||
1116 | |||
1117 | if (!fib_detect_death(fi, order, &last_resort, &last_idx, | ||
1118 | tb->tb_default)) { | ||
1119 | fib_result_assign(res, fi); | ||
1120 | tb->tb_default = order; | ||
1121 | goto out; | ||
1122 | } | ||
1123 | |||
1124 | if (last_idx >= 0) | ||
1125 | fib_result_assign(res, last_resort); | ||
1126 | tb->tb_default = last_idx; | ||
1127 | out: | ||
1128 | return; | ||
1129 | } | ||
1130 | |||
1131 | void fib_update_nh_saddrs(struct net_device *dev) | ||
1132 | { | ||
1133 | struct hlist_head *head; | ||
1134 | struct hlist_node *node; | ||
1135 | struct fib_nh *nh; | ||
1136 | unsigned int hash; | ||
1137 | |||
1138 | hash = fib_devindex_hashfn(dev->ifindex); | ||
1139 | head = &fib_info_devhash[hash]; | ||
1140 | hlist_for_each_entry(nh, node, head, nh_hash) { | ||
1141 | if (nh->nh_dev != dev) | ||
1142 | continue; | ||
1143 | nh->nh_saddr = inet_select_addr(nh->nh_dev, | ||
1144 | nh->nh_gw, | ||
1145 | nh->nh_cfg_scope); | ||
1146 | } | ||
1147 | } | ||
1148 | |||
1128 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1149 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1129 | 1150 | ||
1130 | /* | 1151 | /* |
@@ -1189,7 +1210,7 @@ int fib_sync_up(struct net_device *dev) | |||
1189 | * The algorithm is suboptimal, but it provides really | 1210 | * The algorithm is suboptimal, but it provides really |
1190 | * fair weighted route distribution. | 1211 | * fair weighted route distribution. |
1191 | */ | 1212 | */ |
1192 | void fib_select_multipath(const struct flowi *flp, struct fib_result *res) | 1213 | void fib_select_multipath(struct fib_result *res) |
1193 | { | 1214 | { |
1194 | struct fib_info *fi = res->fi; | 1215 | struct fib_info *fi = res->fi; |
1195 | int w; | 1216 | int w; |