diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 11:24:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 11:31:58 -0400 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /net/ipv6/route.c | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/route.c')
-rw-r--r-- | net/ipv6/route.c | 76 |
1 files changed, 45 insertions, 31 deletions
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 939d122e71b4..a2ed9fdd58d4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -128,7 +128,7 @@ struct uncached_list { | |||
128 | 128 | ||
129 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); | 129 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); |
130 | 130 | ||
131 | static void rt6_uncached_list_add(struct rt6_info *rt) | 131 | void rt6_uncached_list_add(struct rt6_info *rt) |
132 | { | 132 | { |
133 | struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); | 133 | struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); |
134 | 134 | ||
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt) | |||
139 | spin_unlock_bh(&ul->lock); | 139 | spin_unlock_bh(&ul->lock); |
140 | } | 140 | } |
141 | 141 | ||
142 | static void rt6_uncached_list_del(struct rt6_info *rt) | 142 | void rt6_uncached_list_del(struct rt6_info *rt) |
143 | { | 143 | { |
144 | if (!list_empty(&rt->rt6i_uncached)) { | 144 | if (!list_empty(&rt->rt6i_uncached)) { |
145 | struct uncached_list *ul = rt->rt6i_uncached_list; | 145 | struct uncached_list *ul = rt->rt6i_uncached_list; |
@@ -1514,7 +1514,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt) | |||
1514 | } | 1514 | } |
1515 | } | 1515 | } |
1516 | 1516 | ||
1517 | static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) | 1517 | static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, |
1518 | struct rt6_info *rt, int mtu) | ||
1519 | { | ||
1520 | /* If the new MTU is lower than the route PMTU, this new MTU will be the | ||
1521 | * lowest MTU in the path: always allow updating the route PMTU to | ||
1522 | * reflect PMTU decreases. | ||
1523 | * | ||
1524 | * If the new MTU is higher, and the route PMTU is equal to the local | ||
1525 | * MTU, this means the old MTU is the lowest in the path, so allow | ||
1526 | * updating it: if other nodes now have lower MTUs, PMTU discovery will | ||
1527 | * handle this. | ||
1528 | */ | ||
1529 | |||
1530 | if (dst_mtu(&rt->dst) >= mtu) | ||
1531 | return true; | ||
1532 | |||
1533 | if (dst_mtu(&rt->dst) == idev->cnf.mtu6) | ||
1534 | return true; | ||
1535 | |||
1536 | return false; | ||
1537 | } | ||
1538 | |||
1539 | static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, | ||
1540 | struct rt6_info *rt, int mtu) | ||
1518 | { | 1541 | { |
1519 | struct rt6_exception_bucket *bucket; | 1542 | struct rt6_exception_bucket *bucket; |
1520 | struct rt6_exception *rt6_ex; | 1543 | struct rt6_exception *rt6_ex; |
@@ -1523,20 +1546,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) | |||
1523 | bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, | 1546 | bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, |
1524 | lockdep_is_held(&rt6_exception_lock)); | 1547 | lockdep_is_held(&rt6_exception_lock)); |
1525 | 1548 | ||
1526 | if (bucket) { | 1549 | if (!bucket) |
1527 | for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { | 1550 | return; |
1528 | hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { | 1551 | |
1529 | struct rt6_info *entry = rt6_ex->rt6i; | 1552 | for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { |
1530 | /* For RTF_CACHE with rt6i_pmtu == 0 | 1553 | hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { |
1531 | * (i.e. a redirected route), | 1554 | struct rt6_info *entry = rt6_ex->rt6i; |
1532 | * the metrics of its rt->dst.from has already | 1555 | |
1533 | * been updated. | 1556 | /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected |
1534 | */ | 1557 | * route), the metrics of its rt->dst.from have already |
1535 | if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) | 1558 | * been updated. |
1536 | entry->rt6i_pmtu = mtu; | 1559 | */ |
1537 | } | 1560 | if (entry->rt6i_pmtu && |
1538 | bucket++; | 1561 | rt6_mtu_change_route_allowed(idev, entry, mtu)) |
1562 | entry->rt6i_pmtu = mtu; | ||
1539 | } | 1563 | } |
1564 | bucket++; | ||
1540 | } | 1565 | } |
1541 | } | 1566 | } |
1542 | 1567 | ||
@@ -3899,25 +3924,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
3899 | Since RFC 1981 doesn't include administrative MTU increase | 3924 | Since RFC 1981 doesn't include administrative MTU increase |
3900 | update PMTU increase is a MUST. (i.e. jumbo frame) | 3925 | update PMTU increase is a MUST. (i.e. jumbo frame) |
3901 | */ | 3926 | */ |
3902 | /* | ||
3903 | If new MTU is less than route PMTU, this new MTU will be the | ||
3904 | lowest MTU in the path, update the route PMTU to reflect PMTU | ||
3905 | decreases; if new MTU is greater than route PMTU, and the | ||
3906 | old MTU is the lowest MTU in the path, update the route PMTU | ||
3907 | to reflect the increase. In this case if the other nodes' MTU | ||
3908 | also have the lowest MTU, TOO BIG MESSAGE will be lead to | ||
3909 | PMTU discovery. | ||
3910 | */ | ||
3911 | if (rt->dst.dev == arg->dev && | 3927 | if (rt->dst.dev == arg->dev && |
3912 | dst_metric_raw(&rt->dst, RTAX_MTU) && | ||
3913 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { | 3928 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { |
3914 | spin_lock_bh(&rt6_exception_lock); | 3929 | spin_lock_bh(&rt6_exception_lock); |
3915 | if (dst_mtu(&rt->dst) >= arg->mtu || | 3930 | if (dst_metric_raw(&rt->dst, RTAX_MTU) && |
3916 | (dst_mtu(&rt->dst) < arg->mtu && | 3931 | rt6_mtu_change_route_allowed(idev, rt, arg->mtu)) |
3917 | dst_mtu(&rt->dst) == idev->cnf.mtu6)) { | ||
3918 | dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); | 3932 | dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); |
3919 | } | 3933 | rt6_exceptions_update_pmtu(idev, rt, arg->mtu); |
3920 | rt6_exceptions_update_pmtu(rt, arg->mtu); | ||
3921 | spin_unlock_bh(&rt6_exception_lock); | 3934 | spin_unlock_bh(&rt6_exception_lock); |
3922 | } | 3935 | } |
3923 | return 0; | 3936 | return 0; |
@@ -4189,6 +4202,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, | |||
4189 | r_cfg.fc_encap_type = nla_get_u16(nla); | 4202 | r_cfg.fc_encap_type = nla_get_u16(nla); |
4190 | } | 4203 | } |
4191 | 4204 | ||
4205 | r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); | ||
4192 | rt = ip6_route_info_create(&r_cfg, extack); | 4206 | rt = ip6_route_info_create(&r_cfg, extack); |
4193 | if (IS_ERR(rt)) { | 4207 | if (IS_ERR(rt)) { |
4194 | err = PTR_ERR(rt); | 4208 | err = PTR_ERR(rt); |