diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/ipv4/fib_frontend.c | 12 | ||||
| -rw-r--r-- | net/ipv4/fib_trie.c | 44 | ||||
| -rw-r--r-- | net/ipv4/route.c | 129 | ||||
| -rw-r--r-- | net/ipv6/ip6_fib.c | 27 | ||||
| -rw-r--r-- | net/ipv6/route.c | 123 |
5 files changed, 271 insertions, 64 deletions
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 108191667531..317339cd7f03 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -912,10 +912,15 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, | |||
| 912 | NL_SET_ERR_MSG(extack, "Invalid values in header for FIB dump request"); | 912 | NL_SET_ERR_MSG(extack, "Invalid values in header for FIB dump request"); |
| 913 | return -EINVAL; | 913 | return -EINVAL; |
| 914 | } | 914 | } |
| 915 | |||
| 915 | if (rtm->rtm_flags & ~(RTM_F_CLONED | RTM_F_PREFIX)) { | 916 | if (rtm->rtm_flags & ~(RTM_F_CLONED | RTM_F_PREFIX)) { |
| 916 | NL_SET_ERR_MSG(extack, "Invalid flags for FIB dump request"); | 917 | NL_SET_ERR_MSG(extack, "Invalid flags for FIB dump request"); |
| 917 | return -EINVAL; | 918 | return -EINVAL; |
| 918 | } | 919 | } |
| 920 | if (rtm->rtm_flags & RTM_F_CLONED) | ||
| 921 | filter->dump_routes = false; | ||
| 922 | else | ||
| 923 | filter->dump_exceptions = false; | ||
| 919 | 924 | ||
| 920 | filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC); | 925 | filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC); |
| 921 | filter->flags = rtm->rtm_flags; | 926 | filter->flags = rtm->rtm_flags; |
| @@ -962,9 +967,10 @@ EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req); | |||
| 962 | 967 | ||
| 963 | static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | 968 | static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) |
| 964 | { | 969 | { |
| 970 | struct fib_dump_filter filter = { .dump_routes = true, | ||
| 971 | .dump_exceptions = true }; | ||
| 965 | const struct nlmsghdr *nlh = cb->nlh; | 972 | const struct nlmsghdr *nlh = cb->nlh; |
| 966 | struct net *net = sock_net(skb->sk); | 973 | struct net *net = sock_net(skb->sk); |
| 967 | struct fib_dump_filter filter = {}; | ||
| 968 | unsigned int h, s_h; | 974 | unsigned int h, s_h; |
| 969 | unsigned int e = 0, s_e; | 975 | unsigned int e = 0, s_e; |
| 970 | struct fib_table *tb; | 976 | struct fib_table *tb; |
| @@ -981,8 +987,8 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 981 | filter.flags = rtm->rtm_flags & (RTM_F_PREFIX | RTM_F_CLONED); | 987 | filter.flags = rtm->rtm_flags & (RTM_F_PREFIX | RTM_F_CLONED); |
| 982 | } | 988 | } |
| 983 | 989 | ||
| 984 | /* fib entries are never clones and ipv4 does not use prefix flag */ | 990 | /* ipv4 does not use prefix flag */ |
| 985 | if (filter.flags & (RTM_F_PREFIX | RTM_F_CLONED)) | 991 | if (filter.flags & RTM_F_PREFIX) |
| 986 | return skb->len; | 992 | return skb->len; |
| 987 | 993 | ||
| 988 | if (filter.table_id) { | 994 | if (filter.table_id) { |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 90f0fc8c87bd..4400f5051977 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -2090,22 +2090,26 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, | |||
| 2090 | { | 2090 | { |
| 2091 | unsigned int flags = NLM_F_MULTI; | 2091 | unsigned int flags = NLM_F_MULTI; |
| 2092 | __be32 xkey = htonl(l->key); | 2092 | __be32 xkey = htonl(l->key); |
| 2093 | int i, s_i, i_fa, s_fa, err; | ||
| 2093 | struct fib_alias *fa; | 2094 | struct fib_alias *fa; |
| 2094 | int i, s_i; | ||
| 2095 | 2095 | ||
| 2096 | if (filter->filter_set) | 2096 | if (filter->filter_set || |
| 2097 | !filter->dump_exceptions || !filter->dump_routes) | ||
| 2097 | flags |= NLM_F_DUMP_FILTERED; | 2098 | flags |= NLM_F_DUMP_FILTERED; |
| 2098 | 2099 | ||
| 2099 | s_i = cb->args[4]; | 2100 | s_i = cb->args[4]; |
| 2101 | s_fa = cb->args[5]; | ||
| 2100 | i = 0; | 2102 | i = 0; |
| 2101 | 2103 | ||
| 2102 | /* rcu_read_lock is hold by caller */ | 2104 | /* rcu_read_lock is hold by caller */ |
| 2103 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { | 2105 | hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { |
| 2104 | int err; | 2106 | struct fib_info *fi = fa->fa_info; |
| 2105 | 2107 | ||
| 2106 | if (i < s_i) | 2108 | if (i < s_i) |
| 2107 | goto next; | 2109 | goto next; |
| 2108 | 2110 | ||
| 2111 | i_fa = 0; | ||
| 2112 | |||
| 2109 | if (tb->tb_id != fa->tb_id) | 2113 | if (tb->tb_id != fa->tb_id) |
| 2110 | goto next; | 2114 | goto next; |
| 2111 | 2115 | ||
| @@ -2114,29 +2118,43 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, | |||
| 2114 | goto next; | 2118 | goto next; |
| 2115 | 2119 | ||
| 2116 | if ((filter->protocol && | 2120 | if ((filter->protocol && |
| 2117 | fa->fa_info->fib_protocol != filter->protocol)) | 2121 | fi->fib_protocol != filter->protocol)) |
| 2118 | goto next; | 2122 | goto next; |
| 2119 | 2123 | ||
| 2120 | if (filter->dev && | 2124 | if (filter->dev && |
| 2121 | !fib_info_nh_uses_dev(fa->fa_info, filter->dev)) | 2125 | !fib_info_nh_uses_dev(fi, filter->dev)) |
| 2122 | goto next; | 2126 | goto next; |
| 2123 | } | 2127 | } |
| 2124 | 2128 | ||
| 2125 | err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, | 2129 | if (filter->dump_routes && !s_fa) { |
| 2126 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 2130 | err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, |
| 2127 | tb->tb_id, fa->fa_type, | 2131 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
| 2128 | xkey, KEYLENGTH - fa->fa_slen, | 2132 | tb->tb_id, fa->fa_type, |
| 2129 | fa->fa_tos, fa->fa_info, flags); | 2133 | xkey, KEYLENGTH - fa->fa_slen, |
| 2130 | if (err < 0) { | 2134 | fa->fa_tos, fi, flags); |
| 2131 | cb->args[4] = i; | 2135 | if (err < 0) |
| 2132 | return err; | 2136 | goto stop; |
| 2137 | i_fa++; | ||
| 2133 | } | 2138 | } |
| 2139 | |||
| 2140 | if (filter->dump_exceptions) { | ||
| 2141 | err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, | ||
| 2142 | &i_fa, s_fa); | ||
| 2143 | if (err < 0) | ||
| 2144 | goto stop; | ||
| 2145 | } | ||
| 2146 | |||
| 2134 | next: | 2147 | next: |
| 2135 | i++; | 2148 | i++; |
| 2136 | } | 2149 | } |
| 2137 | 2150 | ||
| 2138 | cb->args[4] = i; | 2151 | cb->args[4] = i; |
| 2139 | return skb->len; | 2152 | return skb->len; |
| 2153 | |||
| 2154 | stop: | ||
| 2155 | cb->args[4] = i; | ||
| 2156 | cb->args[5] = i_fa; | ||
| 2157 | return err; | ||
| 2140 | } | 2158 | } |
| 2141 | 2159 | ||
| 2142 | /* rcu_read_lock needs to be hold by caller from readside */ | 2160 | /* rcu_read_lock needs to be hold by caller from readside */ |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 66cbe8a7a168..6aee412a68bd 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -2699,7 +2699,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
| 2699 | r->rtm_family = AF_INET; | 2699 | r->rtm_family = AF_INET; |
| 2700 | r->rtm_dst_len = 32; | 2700 | r->rtm_dst_len = 32; |
| 2701 | r->rtm_src_len = 0; | 2701 | r->rtm_src_len = 0; |
| 2702 | r->rtm_tos = fl4->flowi4_tos; | 2702 | r->rtm_tos = fl4 ? fl4->flowi4_tos : 0; |
| 2703 | r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; | 2703 | r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; |
| 2704 | if (nla_put_u32(skb, RTA_TABLE, table_id)) | 2704 | if (nla_put_u32(skb, RTA_TABLE, table_id)) |
| 2705 | goto nla_put_failure; | 2705 | goto nla_put_failure; |
| @@ -2727,7 +2727,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
| 2727 | nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) | 2727 | nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) |
| 2728 | goto nla_put_failure; | 2728 | goto nla_put_failure; |
| 2729 | #endif | 2729 | #endif |
| 2730 | if (!rt_is_input_route(rt) && | 2730 | if (fl4 && !rt_is_input_route(rt) && |
| 2731 | fl4->saddr != src) { | 2731 | fl4->saddr != src) { |
| 2732 | if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr)) | 2732 | if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr)) |
| 2733 | goto nla_put_failure; | 2733 | goto nla_put_failure; |
| @@ -2767,36 +2767,40 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
| 2767 | if (rtnetlink_put_metrics(skb, metrics) < 0) | 2767 | if (rtnetlink_put_metrics(skb, metrics) < 0) |
| 2768 | goto nla_put_failure; | 2768 | goto nla_put_failure; |
| 2769 | 2769 | ||
| 2770 | if (fl4->flowi4_mark && | 2770 | if (fl4) { |
| 2771 | nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) | 2771 | if (fl4->flowi4_mark && |
| 2772 | goto nla_put_failure; | 2772 | nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) |
| 2773 | 2773 | goto nla_put_failure; | |
| 2774 | if (!uid_eq(fl4->flowi4_uid, INVALID_UID) && | ||
| 2775 | nla_put_u32(skb, RTA_UID, | ||
| 2776 | from_kuid_munged(current_user_ns(), fl4->flowi4_uid))) | ||
| 2777 | goto nla_put_failure; | ||
| 2778 | 2774 | ||
| 2779 | error = rt->dst.error; | 2775 | if (!uid_eq(fl4->flowi4_uid, INVALID_UID) && |
| 2776 | nla_put_u32(skb, RTA_UID, | ||
| 2777 | from_kuid_munged(current_user_ns(), | ||
| 2778 | fl4->flowi4_uid))) | ||
| 2779 | goto nla_put_failure; | ||
| 2780 | 2780 | ||
| 2781 | if (rt_is_input_route(rt)) { | 2781 | if (rt_is_input_route(rt)) { |
| 2782 | #ifdef CONFIG_IP_MROUTE | 2782 | #ifdef CONFIG_IP_MROUTE |
| 2783 | if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && | 2783 | if (ipv4_is_multicast(dst) && |
| 2784 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { | 2784 | !ipv4_is_local_multicast(dst) && |
| 2785 | int err = ipmr_get_route(net, skb, | 2785 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { |
| 2786 | fl4->saddr, fl4->daddr, | 2786 | int err = ipmr_get_route(net, skb, |
| 2787 | r, portid); | 2787 | fl4->saddr, fl4->daddr, |
| 2788 | 2788 | r, portid); | |
| 2789 | if (err <= 0) { | 2789 | |
| 2790 | if (err == 0) | 2790 | if (err <= 0) { |
| 2791 | return 0; | 2791 | if (err == 0) |
| 2792 | goto nla_put_failure; | 2792 | return 0; |
| 2793 | } | 2793 | goto nla_put_failure; |
| 2794 | } else | 2794 | } |
| 2795 | } else | ||
| 2795 | #endif | 2796 | #endif |
| 2796 | if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif)) | 2797 | if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif)) |
| 2797 | goto nla_put_failure; | 2798 | goto nla_put_failure; |
| 2799 | } | ||
| 2798 | } | 2800 | } |
| 2799 | 2801 | ||
| 2802 | error = rt->dst.error; | ||
| 2803 | |||
| 2800 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) | 2804 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) |
| 2801 | goto nla_put_failure; | 2805 | goto nla_put_failure; |
| 2802 | 2806 | ||
| @@ -2808,6 +2812,79 @@ nla_put_failure: | |||
| 2808 | return -EMSGSIZE; | 2812 | return -EMSGSIZE; |
| 2809 | } | 2813 | } |
| 2810 | 2814 | ||
| 2815 | static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, | ||
| 2816 | struct netlink_callback *cb, u32 table_id, | ||
| 2817 | struct fnhe_hash_bucket *bucket, int genid, | ||
| 2818 | int *fa_index, int fa_start) | ||
| 2819 | { | ||
| 2820 | int i; | ||
| 2821 | |||
| 2822 | for (i = 0; i < FNHE_HASH_SIZE; i++) { | ||
| 2823 | struct fib_nh_exception *fnhe; | ||
| 2824 | |||
| 2825 | for (fnhe = rcu_dereference(bucket[i].chain); fnhe; | ||
| 2826 | fnhe = rcu_dereference(fnhe->fnhe_next)) { | ||
| 2827 | struct rtable *rt; | ||
| 2828 | int err; | ||
| 2829 | |||
| 2830 | if (*fa_index < fa_start) | ||
| 2831 | goto next; | ||
| 2832 | |||
| 2833 | if (fnhe->fnhe_genid != genid) | ||
| 2834 | goto next; | ||
| 2835 | |||
| 2836 | if (fnhe->fnhe_expires && | ||
| 2837 | time_after(jiffies, fnhe->fnhe_expires)) | ||
| 2838 | goto next; | ||
| 2839 | |||
| 2840 | rt = rcu_dereference(fnhe->fnhe_rth_input); | ||
| 2841 | if (!rt) | ||
| 2842 | rt = rcu_dereference(fnhe->fnhe_rth_output); | ||
| 2843 | if (!rt) | ||
| 2844 | goto next; | ||
| 2845 | |||
| 2846 | err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, | ||
| 2847 | table_id, NULL, skb, | ||
| 2848 | NETLINK_CB(cb->skb).portid, | ||
| 2849 | cb->nlh->nlmsg_seq); | ||
| 2850 | if (err) | ||
| 2851 | return err; | ||
| 2852 | next: | ||
| 2853 | (*fa_index)++; | ||
| 2854 | } | ||
| 2855 | } | ||
| 2856 | |||
| 2857 | return 0; | ||
| 2858 | } | ||
| 2859 | |||
| 2860 | int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, | ||
| 2861 | u32 table_id, struct fib_info *fi, | ||
| 2862 | int *fa_index, int fa_start) | ||
| 2863 | { | ||
| 2864 | struct net *net = sock_net(cb->skb->sk); | ||
| 2865 | int nhsel, genid = fnhe_genid(net); | ||
| 2866 | |||
| 2867 | for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { | ||
| 2868 | struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); | ||
| 2869 | struct fnhe_hash_bucket *bucket; | ||
| 2870 | int err; | ||
| 2871 | |||
| 2872 | if (nhc->nhc_flags & RTNH_F_DEAD) | ||
| 2873 | continue; | ||
| 2874 | |||
| 2875 | bucket = rcu_dereference(nhc->nhc_exceptions); | ||
| 2876 | if (!bucket) | ||
| 2877 | continue; | ||
| 2878 | |||
| 2879 | err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, genid, | ||
| 2880 | fa_index, fa_start); | ||
| 2881 | if (err) | ||
| 2882 | return err; | ||
| 2883 | } | ||
| 2884 | |||
| 2885 | return 0; | ||
| 2886 | } | ||
| 2887 | |||
| 2811 | static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst, | 2888 | static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst, |
| 2812 | u8 ip_proto, __be16 sport, | 2889 | u8 ip_proto, __be16 sport, |
| 2813 | __be16 dport) | 2890 | __be16 dport) |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5b1c9b5b9247..49884f96232b 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -464,12 +464,19 @@ static int fib6_dump_node(struct fib6_walker *w) | |||
| 464 | struct fib6_info *rt; | 464 | struct fib6_info *rt; |
| 465 | 465 | ||
| 466 | for_each_fib6_walker_rt(w) { | 466 | for_each_fib6_walker_rt(w) { |
| 467 | res = rt6_dump_route(rt, w->args); | 467 | res = rt6_dump_route(rt, w->args, w->skip_in_node); |
| 468 | if (res < 0) { | 468 | if (res >= 0) { |
| 469 | /* Frame is full, suspend walking */ | 469 | /* Frame is full, suspend walking */ |
| 470 | w->leaf = rt; | 470 | w->leaf = rt; |
| 471 | |||
| 472 | /* We'll restart from this node, so if some routes were | ||
| 473 | * already dumped, skip them next time. | ||
| 474 | */ | ||
| 475 | w->skip_in_node += res; | ||
| 476 | |||
| 471 | return 1; | 477 | return 1; |
| 472 | } | 478 | } |
| 479 | w->skip_in_node = 0; | ||
| 473 | 480 | ||
| 474 | /* Multipath routes are dumped in one route with the | 481 | /* Multipath routes are dumped in one route with the |
| 475 | * RTA_MULTIPATH attribute. Jump 'rt' to point to the | 482 | * RTA_MULTIPATH attribute. Jump 'rt' to point to the |
| @@ -521,6 +528,7 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, | |||
| 521 | if (cb->args[4] == 0) { | 528 | if (cb->args[4] == 0) { |
| 522 | w->count = 0; | 529 | w->count = 0; |
| 523 | w->skip = 0; | 530 | w->skip = 0; |
| 531 | w->skip_in_node = 0; | ||
| 524 | 532 | ||
| 525 | spin_lock_bh(&table->tb6_lock); | 533 | spin_lock_bh(&table->tb6_lock); |
| 526 | res = fib6_walk(net, w); | 534 | res = fib6_walk(net, w); |
| @@ -536,6 +544,7 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, | |||
| 536 | w->state = FWS_INIT; | 544 | w->state = FWS_INIT; |
| 537 | w->node = w->root; | 545 | w->node = w->root; |
| 538 | w->skip = w->count; | 546 | w->skip = w->count; |
| 547 | w->skip_in_node = 0; | ||
| 539 | } else | 548 | } else |
| 540 | w->skip = 0; | 549 | w->skip = 0; |
| 541 | 550 | ||
| @@ -553,9 +562,10 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, | |||
| 553 | 562 | ||
| 554 | static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | 563 | static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) |
| 555 | { | 564 | { |
| 565 | struct rt6_rtnl_dump_arg arg = { .filter.dump_exceptions = true, | ||
| 566 | .filter.dump_routes = true }; | ||
| 556 | const struct nlmsghdr *nlh = cb->nlh; | 567 | const struct nlmsghdr *nlh = cb->nlh; |
| 557 | struct net *net = sock_net(skb->sk); | 568 | struct net *net = sock_net(skb->sk); |
| 558 | struct rt6_rtnl_dump_arg arg = {}; | ||
| 559 | unsigned int h, s_h; | 569 | unsigned int h, s_h; |
| 560 | unsigned int e = 0, s_e; | 570 | unsigned int e = 0, s_e; |
| 561 | struct fib6_walker *w; | 571 | struct fib6_walker *w; |
| @@ -572,13 +582,10 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 572 | } else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) { | 582 | } else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) { |
| 573 | struct rtmsg *rtm = nlmsg_data(nlh); | 583 | struct rtmsg *rtm = nlmsg_data(nlh); |
| 574 | 584 | ||
| 575 | arg.filter.flags = rtm->rtm_flags & (RTM_F_PREFIX|RTM_F_CLONED); | 585 | if (rtm->rtm_flags & RTM_F_PREFIX) |
| 586 | arg.filter.flags = RTM_F_PREFIX; | ||
| 576 | } | 587 | } |
| 577 | 588 | ||
| 578 | /* fib entries are never clones */ | ||
| 579 | if (arg.filter.flags & RTM_F_CLONED) | ||
| 580 | goto out; | ||
| 581 | |||
| 582 | w = (void *)cb->args[2]; | 589 | w = (void *)cb->args[2]; |
| 583 | if (!w) { | 590 | if (!w) { |
| 584 | /* New dump: | 591 | /* New dump: |
| @@ -1589,7 +1596,8 @@ static struct fib6_node *fib6_locate_1(struct fib6_node *root, | |||
| 1589 | if (plen == fn->fn_bit) | 1596 | if (plen == fn->fn_bit) |
| 1590 | return fn; | 1597 | return fn; |
| 1591 | 1598 | ||
| 1592 | prev = fn; | 1599 | if (fn->fn_flags & RTN_RTINFO) |
| 1600 | prev = fn; | ||
| 1593 | 1601 | ||
| 1594 | next: | 1602 | next: |
| 1595 | /* | 1603 | /* |
| @@ -2096,6 +2104,7 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root, | |||
| 2096 | c.w.func = fib6_clean_node; | 2104 | c.w.func = fib6_clean_node; |
| 2097 | c.w.count = 0; | 2105 | c.w.count = 0; |
| 2098 | c.w.skip = 0; | 2106 | c.w.skip = 0; |
| 2107 | c.w.skip_in_node = 0; | ||
| 2099 | c.func = func; | 2108 | c.func = func; |
| 2100 | c.sernum = sernum; | 2109 | c.sernum = sernum; |
| 2101 | c.arg = arg; | 2110 | c.arg = arg; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3975ae8e2440..be5e65c97652 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -3840,7 +3840,8 @@ static int ip6_route_del(struct fib6_config *cfg, | |||
| 3840 | for_each_fib6_node_rt_rcu(fn) { | 3840 | for_each_fib6_node_rt_rcu(fn) { |
| 3841 | struct fib6_nh *nh; | 3841 | struct fib6_nh *nh; |
| 3842 | 3842 | ||
| 3843 | if (rt->nh && rt->nh->id != cfg->fc_nh_id) | 3843 | if (rt->nh && cfg->fc_nh_id && |
| 3844 | rt->nh->id != cfg->fc_nh_id) | ||
| 3844 | continue; | 3845 | continue; |
| 3845 | 3846 | ||
| 3846 | if (cfg->fc_flags & RTF_CACHE) { | 3847 | if (cfg->fc_flags & RTF_CACHE) { |
| @@ -5521,33 +5522,129 @@ static bool fib6_info_uses_dev(const struct fib6_info *f6i, | |||
| 5521 | return false; | 5522 | return false; |
| 5522 | } | 5523 | } |
| 5523 | 5524 | ||
| 5524 | int rt6_dump_route(struct fib6_info *rt, void *p_arg) | 5525 | struct fib6_nh_exception_dump_walker { |
| 5526 | struct rt6_rtnl_dump_arg *dump; | ||
| 5527 | struct fib6_info *rt; | ||
| 5528 | unsigned int flags; | ||
| 5529 | unsigned int skip; | ||
| 5530 | unsigned int count; | ||
| 5531 | }; | ||
| 5532 | |||
| 5533 | static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg) | ||
| 5534 | { | ||
| 5535 | struct fib6_nh_exception_dump_walker *w = arg; | ||
| 5536 | struct rt6_rtnl_dump_arg *dump = w->dump; | ||
| 5537 | struct rt6_exception_bucket *bucket; | ||
| 5538 | struct rt6_exception *rt6_ex; | ||
| 5539 | int i, err; | ||
| 5540 | |||
| 5541 | bucket = fib6_nh_get_excptn_bucket(nh, NULL); | ||
| 5542 | if (!bucket) | ||
| 5543 | return 0; | ||
| 5544 | |||
| 5545 | for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { | ||
| 5546 | hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { | ||
| 5547 | if (w->skip) { | ||
| 5548 | w->skip--; | ||
| 5549 | continue; | ||
| 5550 | } | ||
| 5551 | |||
| 5552 | /* Expiration of entries doesn't bump sernum, insertion | ||
| 5553 | * does. Removal is triggered by insertion, so we can | ||
| 5554 | * rely on the fact that if entries change between two | ||
| 5555 | * partial dumps, this node is scanned again completely, | ||
| 5556 | * see rt6_insert_exception() and fib6_dump_table(). | ||
| 5557 | * | ||
| 5558 | * Count expired entries we go through as handled | ||
| 5559 | * entries that we'll skip next time, in case of partial | ||
| 5560 | * node dump. Otherwise, if entries expire meanwhile, | ||
| 5561 | * we'll skip the wrong amount. | ||
| 5562 | */ | ||
| 5563 | if (rt6_check_expired(rt6_ex->rt6i)) { | ||
| 5564 | w->count++; | ||
| 5565 | continue; | ||
| 5566 | } | ||
| 5567 | |||
| 5568 | err = rt6_fill_node(dump->net, dump->skb, w->rt, | ||
| 5569 | &rt6_ex->rt6i->dst, NULL, NULL, 0, | ||
| 5570 | RTM_NEWROUTE, | ||
| 5571 | NETLINK_CB(dump->cb->skb).portid, | ||
| 5572 | dump->cb->nlh->nlmsg_seq, w->flags); | ||
| 5573 | if (err) | ||
| 5574 | return err; | ||
| 5575 | |||
| 5576 | w->count++; | ||
| 5577 | } | ||
| 5578 | bucket++; | ||
| 5579 | } | ||
| 5580 | |||
| 5581 | return 0; | ||
| 5582 | } | ||
| 5583 | |||
| 5584 | /* Return -1 if done with node, number of handled routes on partial dump */ | ||
| 5585 | int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip) | ||
| 5525 | { | 5586 | { |
| 5526 | struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; | 5587 | struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; |
| 5527 | struct fib_dump_filter *filter = &arg->filter; | 5588 | struct fib_dump_filter *filter = &arg->filter; |
| 5528 | unsigned int flags = NLM_F_MULTI; | 5589 | unsigned int flags = NLM_F_MULTI; |
| 5529 | struct net *net = arg->net; | 5590 | struct net *net = arg->net; |
| 5591 | int count = 0; | ||
| 5530 | 5592 | ||
| 5531 | if (rt == net->ipv6.fib6_null_entry) | 5593 | if (rt == net->ipv6.fib6_null_entry) |
| 5532 | return 0; | 5594 | return -1; |
| 5533 | 5595 | ||
| 5534 | if ((filter->flags & RTM_F_PREFIX) && | 5596 | if ((filter->flags & RTM_F_PREFIX) && |
| 5535 | !(rt->fib6_flags & RTF_PREFIX_RT)) { | 5597 | !(rt->fib6_flags & RTF_PREFIX_RT)) { |
| 5536 | /* success since this is not a prefix route */ | 5598 | /* success since this is not a prefix route */ |
| 5537 | return 1; | 5599 | return -1; |
| 5538 | } | 5600 | } |
| 5539 | if (filter->filter_set) { | 5601 | if (filter->filter_set && |
| 5540 | if ((filter->rt_type && rt->fib6_type != filter->rt_type) || | 5602 | ((filter->rt_type && rt->fib6_type != filter->rt_type) || |
| 5541 | (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) || | 5603 | (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) || |
| 5542 | (filter->protocol && rt->fib6_protocol != filter->protocol)) { | 5604 | (filter->protocol && rt->fib6_protocol != filter->protocol))) { |
| 5543 | return 1; | 5605 | return -1; |
| 5544 | } | 5606 | } |
| 5607 | |||
| 5608 | if (filter->filter_set || | ||
| 5609 | !filter->dump_routes || !filter->dump_exceptions) { | ||
| 5545 | flags |= NLM_F_DUMP_FILTERED; | 5610 | flags |= NLM_F_DUMP_FILTERED; |
| 5546 | } | 5611 | } |
| 5547 | 5612 | ||
| 5548 | return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0, | 5613 | if (filter->dump_routes) { |
| 5549 | RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, | 5614 | if (skip) { |
| 5550 | arg->cb->nlh->nlmsg_seq, flags); | 5615 | skip--; |
| 5616 | } else { | ||
| 5617 | if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, | ||
| 5618 | 0, RTM_NEWROUTE, | ||
| 5619 | NETLINK_CB(arg->cb->skb).portid, | ||
| 5620 | arg->cb->nlh->nlmsg_seq, flags)) { | ||
| 5621 | return 0; | ||
| 5622 | } | ||
| 5623 | count++; | ||
| 5624 | } | ||
| 5625 | } | ||
| 5626 | |||
| 5627 | if (filter->dump_exceptions) { | ||
| 5628 | struct fib6_nh_exception_dump_walker w = { .dump = arg, | ||
| 5629 | .rt = rt, | ||
| 5630 | .flags = flags, | ||
| 5631 | .skip = skip, | ||
| 5632 | .count = 0 }; | ||
| 5633 | int err; | ||
| 5634 | |||
| 5635 | if (rt->nh) { | ||
| 5636 | err = nexthop_for_each_fib6_nh(rt->nh, | ||
| 5637 | rt6_nh_dump_exceptions, | ||
| 5638 | &w); | ||
| 5639 | } else { | ||
| 5640 | err = rt6_nh_dump_exceptions(rt->fib6_nh, &w); | ||
| 5641 | } | ||
| 5642 | |||
| 5643 | if (err) | ||
| 5644 | return count += w.count; | ||
| 5645 | } | ||
| 5646 | |||
| 5647 | return -1; | ||
| 5551 | } | 5648 | } |
| 5552 | 5649 | ||
| 5553 | static int inet6_rtm_valid_getroute_req(struct sk_buff *skb, | 5650 | static int inet6_rtm_valid_getroute_req(struct sk_buff *skb, |
