aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/route.c')
-rw-r--r--net/ipv6/route.c76
1 files changed, 45 insertions, 31 deletions
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 939d122e71b4..a2ed9fdd58d4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,7 +128,7 @@ struct uncached_list {
128 128
129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
130 130
131static void rt6_uncached_list_add(struct rt6_info *rt) 131void rt6_uncached_list_add(struct rt6_info *rt)
132{ 132{
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
134 134
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
139 spin_unlock_bh(&ul->lock); 139 spin_unlock_bh(&ul->lock);
140} 140}
141 141
142static void rt6_uncached_list_del(struct rt6_info *rt) 142void rt6_uncached_list_del(struct rt6_info *rt)
143{ 143{
144 if (!list_empty(&rt->rt6i_uncached)) { 144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list; 145 struct uncached_list *ul = rt->rt6i_uncached_list;
@@ -1514,7 +1514,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1514 } 1514 }
1515} 1515}
1516 1516
1517static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) 1517static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1518 struct rt6_info *rt, int mtu)
1519{
1520 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1521 * lowest MTU in the path: always allow updating the route PMTU to
1522 * reflect PMTU decreases.
1523 *
1524 * If the new MTU is higher, and the route PMTU is equal to the local
1525 * MTU, this means the old MTU is the lowest in the path, so allow
1526 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1527 * handle this.
1528 */
1529
1530 if (dst_mtu(&rt->dst) >= mtu)
1531 return true;
1532
1533 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1534 return true;
1535
1536 return false;
1537}
1538
1539static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1540 struct rt6_info *rt, int mtu)
1518{ 1541{
1519 struct rt6_exception_bucket *bucket; 1542 struct rt6_exception_bucket *bucket;
1520 struct rt6_exception *rt6_ex; 1543 struct rt6_exception *rt6_ex;
@@ -1523,20 +1546,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1523 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1546 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1524 lockdep_is_held(&rt6_exception_lock)); 1547 lockdep_is_held(&rt6_exception_lock));
1525 1548
1526 if (bucket) { 1549 if (!bucket)
1527 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1550 return;
1528 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1551
1529 struct rt6_info *entry = rt6_ex->rt6i; 1552 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1530 /* For RTF_CACHE with rt6i_pmtu == 0 1553 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1531 * (i.e. a redirected route), 1554 struct rt6_info *entry = rt6_ex->rt6i;
1532 * the metrics of its rt->dst.from has already 1555
1533 * been updated. 1556 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1534 */ 1557 * route), the metrics of its rt->dst.from have already
1535 if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) 1558 * been updated.
1536 entry->rt6i_pmtu = mtu; 1559 */
1537 } 1560 if (entry->rt6i_pmtu &&
1538 bucket++; 1561 rt6_mtu_change_route_allowed(idev, entry, mtu))
1562 entry->rt6i_pmtu = mtu;
1539 } 1563 }
1564 bucket++;
1540 } 1565 }
1541} 1566}
1542 1567
@@ -3899,25 +3924,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3899 Since RFC 1981 doesn't include administrative MTU increase 3924 Since RFC 1981 doesn't include administrative MTU increase
3900 update PMTU increase is a MUST. (i.e. jumbo frame) 3925 update PMTU increase is a MUST. (i.e. jumbo frame)
3901 */ 3926 */
3902 /*
3903 If new MTU is less than route PMTU, this new MTU will be the
3904 lowest MTU in the path, update the route PMTU to reflect PMTU
3905 decreases; if new MTU is greater than route PMTU, and the
3906 old MTU is the lowest MTU in the path, update the route PMTU
3907 to reflect the increase. In this case if the other nodes' MTU
3908 also have the lowest MTU, TOO BIG MESSAGE will be lead to
3909 PMTU discovery.
3910 */
3911 if (rt->dst.dev == arg->dev && 3927 if (rt->dst.dev == arg->dev &&
3912 dst_metric_raw(&rt->dst, RTAX_MTU) &&
3913 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 3928 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3914 spin_lock_bh(&rt6_exception_lock); 3929 spin_lock_bh(&rt6_exception_lock);
3915 if (dst_mtu(&rt->dst) >= arg->mtu || 3930 if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
3916 (dst_mtu(&rt->dst) < arg->mtu && 3931 rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
3917 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
3918 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); 3932 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3919 } 3933 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
3920 rt6_exceptions_update_pmtu(rt, arg->mtu);
3921 spin_unlock_bh(&rt6_exception_lock); 3934 spin_unlock_bh(&rt6_exception_lock);
3922 } 3935 }
3923 return 0; 3936 return 0;
@@ -4189,6 +4202,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4189 r_cfg.fc_encap_type = nla_get_u16(nla); 4202 r_cfg.fc_encap_type = nla_get_u16(nla);
4190 } 4203 }
4191 4204
4205 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4192 rt = ip6_route_info_create(&r_cfg, extack); 4206 rt = ip6_route_info_create(&r_cfg, extack);
4193 if (IS_ERR(rt)) { 4207 if (IS_ERR(rt)) {
4194 err = PTR_ERR(rt); 4208 err = PTR_ERR(rt);