diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 11:24:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 11:31:58 -0400 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /net/ipv6 | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/datagram.c | 21 | ||||
-rw-r--r-- | net/ipv6/ip6_gre.c | 8 | ||||
-rw-r--r-- | net/ipv6/ndisc.c | 3 | ||||
-rw-r--r-- | net/ipv6/route.c | 76 | ||||
-rw-r--r-- | net/ipv6/seg6_iptunnel.c | 7 | ||||
-rw-r--r-- | net/ipv6/xfrm6_mode_tunnel.c | 3 | ||||
-rw-r--r-- | net/ipv6/xfrm6_policy.c | 5 |
7 files changed, 79 insertions, 44 deletions
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index b27333d7b099..88bc2ef7c7a8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, | |||
146 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | 146 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
147 | struct inet_sock *inet = inet_sk(sk); | 147 | struct inet_sock *inet = inet_sk(sk); |
148 | struct ipv6_pinfo *np = inet6_sk(sk); | 148 | struct ipv6_pinfo *np = inet6_sk(sk); |
149 | struct in6_addr *daddr; | 149 | struct in6_addr *daddr, old_daddr; |
150 | __be32 fl6_flowlabel = 0; | ||
151 | __be32 old_fl6_flowlabel; | ||
152 | __be16 old_dport; | ||
150 | int addr_type; | 153 | int addr_type; |
151 | int err; | 154 | int err; |
152 | __be32 fl6_flowlabel = 0; | ||
153 | 155 | ||
154 | if (usin->sin6_family == AF_INET) { | 156 | if (usin->sin6_family == AF_INET) { |
155 | if (__ipv6_only_sock(sk)) | 157 | if (__ipv6_only_sock(sk)) |
@@ -238,9 +240,13 @@ ipv4_connected: | |||
238 | } | 240 | } |
239 | } | 241 | } |
240 | 242 | ||
243 | /* save the current peer information before updating it */ | ||
244 | old_daddr = sk->sk_v6_daddr; | ||
245 | old_fl6_flowlabel = np->flow_label; | ||
246 | old_dport = inet->inet_dport; | ||
247 | |||
241 | sk->sk_v6_daddr = *daddr; | 248 | sk->sk_v6_daddr = *daddr; |
242 | np->flow_label = fl6_flowlabel; | 249 | np->flow_label = fl6_flowlabel; |
243 | |||
244 | inet->inet_dport = usin->sin6_port; | 250 | inet->inet_dport = usin->sin6_port; |
245 | 251 | ||
246 | /* | 252 | /* |
@@ -250,11 +256,12 @@ ipv4_connected: | |||
250 | 256 | ||
251 | err = ip6_datagram_dst_update(sk, true); | 257 | err = ip6_datagram_dst_update(sk, true); |
252 | if (err) { | 258 | if (err) { |
253 | /* Reset daddr and dport so that udp_v6_early_demux() | 259 | /* Restore the socket peer info, to keep it consistent with |
254 | * fails to find this socket | 260 | * the old socket state |
255 | */ | 261 | */ |
256 | memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); | 262 | sk->sk_v6_daddr = old_daddr; |
257 | inet->inet_dport = 0; | 263 | np->flow_label = old_fl6_flowlabel; |
264 | inet->inet_dport = old_dport; | ||
258 | goto out; | 265 | goto out; |
259 | } | 266 | } |
260 | 267 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 6adbcf40cf8c..3a98c694da5f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -126,7 +126,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, | |||
126 | struct ip6_tnl *t, *cand = NULL; | 126 | struct ip6_tnl *t, *cand = NULL; |
127 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); | 127 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
128 | int dev_type = (gre_proto == htons(ETH_P_TEB) || | 128 | int dev_type = (gre_proto == htons(ETH_P_TEB) || |
129 | gre_proto == htons(ETH_P_ERSPAN)) ? | 129 | gre_proto == htons(ETH_P_ERSPAN) || |
130 | gre_proto == htons(ETH_P_ERSPAN2)) ? | ||
130 | ARPHRD_ETHER : ARPHRD_IP6GRE; | 131 | ARPHRD_ETHER : ARPHRD_IP6GRE; |
131 | int score, cand_score = 4; | 132 | int score, cand_score = 4; |
132 | 133 | ||
@@ -905,6 +906,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
905 | truncate = true; | 906 | truncate = true; |
906 | } | 907 | } |
907 | 908 | ||
909 | if (skb_cow_head(skb, dev->needed_headroom)) | ||
910 | goto tx_err; | ||
911 | |||
908 | t->parms.o_flags &= ~TUNNEL_KEY; | 912 | t->parms.o_flags &= ~TUNNEL_KEY; |
909 | IPCB(skb)->flags = 0; | 913 | IPCB(skb)->flags = 0; |
910 | 914 | ||
@@ -947,6 +951,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
947 | md->u.md2.dir, | 951 | md->u.md2.dir, |
948 | get_hwid(&md->u.md2), | 952 | get_hwid(&md->u.md2), |
949 | truncate, false); | 953 | truncate, false); |
954 | } else { | ||
955 | goto tx_err; | ||
950 | } | 956 | } |
951 | } else { | 957 | } else { |
952 | switch (skb->protocol) { | 958 | switch (skb->protocol) { |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 10024eb0c521..d1d0b2fa7a07 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb, | |||
1554 | *(opt++) = (rd_len >> 3); | 1554 | *(opt++) = (rd_len >> 3); |
1555 | opt += 6; | 1555 | opt += 6; |
1556 | 1556 | ||
1557 | memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); | 1557 | skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt, |
1558 | rd_len - 8); | ||
1558 | } | 1559 | } |
1559 | 1560 | ||
1560 | void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) | 1561 | void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 939d122e71b4..a2ed9fdd58d4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -128,7 +128,7 @@ struct uncached_list { | |||
128 | 128 | ||
129 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); | 129 | static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); |
130 | 130 | ||
131 | static void rt6_uncached_list_add(struct rt6_info *rt) | 131 | void rt6_uncached_list_add(struct rt6_info *rt) |
132 | { | 132 | { |
133 | struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); | 133 | struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); |
134 | 134 | ||
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt) | |||
139 | spin_unlock_bh(&ul->lock); | 139 | spin_unlock_bh(&ul->lock); |
140 | } | 140 | } |
141 | 141 | ||
142 | static void rt6_uncached_list_del(struct rt6_info *rt) | 142 | void rt6_uncached_list_del(struct rt6_info *rt) |
143 | { | 143 | { |
144 | if (!list_empty(&rt->rt6i_uncached)) { | 144 | if (!list_empty(&rt->rt6i_uncached)) { |
145 | struct uncached_list *ul = rt->rt6i_uncached_list; | 145 | struct uncached_list *ul = rt->rt6i_uncached_list; |
@@ -1514,7 +1514,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt) | |||
1514 | } | 1514 | } |
1515 | } | 1515 | } |
1516 | 1516 | ||
1517 | static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) | 1517 | static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, |
1518 | struct rt6_info *rt, int mtu) | ||
1519 | { | ||
1520 | /* If the new MTU is lower than the route PMTU, this new MTU will be the | ||
1521 | * lowest MTU in the path: always allow updating the route PMTU to | ||
1522 | * reflect PMTU decreases. | ||
1523 | * | ||
1524 | * If the new MTU is higher, and the route PMTU is equal to the local | ||
1525 | * MTU, this means the old MTU is the lowest in the path, so allow | ||
1526 | * updating it: if other nodes now have lower MTUs, PMTU discovery will | ||
1527 | * handle this. | ||
1528 | */ | ||
1529 | |||
1530 | if (dst_mtu(&rt->dst) >= mtu) | ||
1531 | return true; | ||
1532 | |||
1533 | if (dst_mtu(&rt->dst) == idev->cnf.mtu6) | ||
1534 | return true; | ||
1535 | |||
1536 | return false; | ||
1537 | } | ||
1538 | |||
1539 | static void rt6_exceptions_update_pmtu(struct inet6_dev *idev, | ||
1540 | struct rt6_info *rt, int mtu) | ||
1518 | { | 1541 | { |
1519 | struct rt6_exception_bucket *bucket; | 1542 | struct rt6_exception_bucket *bucket; |
1520 | struct rt6_exception *rt6_ex; | 1543 | struct rt6_exception *rt6_ex; |
@@ -1523,20 +1546,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) | |||
1523 | bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, | 1546 | bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, |
1524 | lockdep_is_held(&rt6_exception_lock)); | 1547 | lockdep_is_held(&rt6_exception_lock)); |
1525 | 1548 | ||
1526 | if (bucket) { | 1549 | if (!bucket) |
1527 | for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { | 1550 | return; |
1528 | hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { | 1551 | |
1529 | struct rt6_info *entry = rt6_ex->rt6i; | 1552 | for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { |
1530 | /* For RTF_CACHE with rt6i_pmtu == 0 | 1553 | hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { |
1531 | * (i.e. a redirected route), | 1554 | struct rt6_info *entry = rt6_ex->rt6i; |
1532 | * the metrics of its rt->dst.from has already | 1555 | |
1533 | * been updated. | 1556 | /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected |
1534 | */ | 1557 | * route), the metrics of its rt->dst.from have already |
1535 | if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) | 1558 | * been updated. |
1536 | entry->rt6i_pmtu = mtu; | 1559 | */ |
1537 | } | 1560 | if (entry->rt6i_pmtu && |
1538 | bucket++; | 1561 | rt6_mtu_change_route_allowed(idev, entry, mtu)) |
1562 | entry->rt6i_pmtu = mtu; | ||
1539 | } | 1563 | } |
1564 | bucket++; | ||
1540 | } | 1565 | } |
1541 | } | 1566 | } |
1542 | 1567 | ||
@@ -3899,25 +3924,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
3899 | Since RFC 1981 doesn't include administrative MTU increase | 3924 | Since RFC 1981 doesn't include administrative MTU increase |
3900 | update PMTU increase is a MUST. (i.e. jumbo frame) | 3925 | update PMTU increase is a MUST. (i.e. jumbo frame) |
3901 | */ | 3926 | */ |
3902 | /* | ||
3903 | If new MTU is less than route PMTU, this new MTU will be the | ||
3904 | lowest MTU in the path, update the route PMTU to reflect PMTU | ||
3905 | decreases; if new MTU is greater than route PMTU, and the | ||
3906 | old MTU is the lowest MTU in the path, update the route PMTU | ||
3907 | to reflect the increase. In this case if the other nodes' MTU | ||
3908 | also have the lowest MTU, TOO BIG MESSAGE will be lead to | ||
3909 | PMTU discovery. | ||
3910 | */ | ||
3911 | if (rt->dst.dev == arg->dev && | 3927 | if (rt->dst.dev == arg->dev && |
3912 | dst_metric_raw(&rt->dst, RTAX_MTU) && | ||
3913 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { | 3928 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { |
3914 | spin_lock_bh(&rt6_exception_lock); | 3929 | spin_lock_bh(&rt6_exception_lock); |
3915 | if (dst_mtu(&rt->dst) >= arg->mtu || | 3930 | if (dst_metric_raw(&rt->dst, RTAX_MTU) && |
3916 | (dst_mtu(&rt->dst) < arg->mtu && | 3931 | rt6_mtu_change_route_allowed(idev, rt, arg->mtu)) |
3917 | dst_mtu(&rt->dst) == idev->cnf.mtu6)) { | ||
3918 | dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); | 3932 | dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); |
3919 | } | 3933 | rt6_exceptions_update_pmtu(idev, rt, arg->mtu); |
3920 | rt6_exceptions_update_pmtu(rt, arg->mtu); | ||
3921 | spin_unlock_bh(&rt6_exception_lock); | 3934 | spin_unlock_bh(&rt6_exception_lock); |
3922 | } | 3935 | } |
3923 | return 0; | 3936 | return 0; |
@@ -4189,6 +4202,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, | |||
4189 | r_cfg.fc_encap_type = nla_get_u16(nla); | 4202 | r_cfg.fc_encap_type = nla_get_u16(nla); |
4190 | } | 4203 | } |
4191 | 4204 | ||
4205 | r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); | ||
4192 | rt = ip6_route_info_create(&r_cfg, extack); | 4206 | rt = ip6_route_info_create(&r_cfg, extack); |
4193 | if (IS_ERR(rt)) { | 4207 | if (IS_ERR(rt)) { |
4194 | err = PTR_ERR(rt); | 4208 | err = PTR_ERR(rt); |
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index bd6cc688bd19..7a78dcfda68a 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev, | |||
93 | /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ | 93 | /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ |
94 | int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | 94 | int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) |
95 | { | 95 | { |
96 | struct net *net = dev_net(skb_dst(skb)->dev); | 96 | struct dst_entry *dst = skb_dst(skb); |
97 | struct net *net = dev_net(dst->dev); | ||
97 | struct ipv6hdr *hdr, *inner_hdr; | 98 | struct ipv6hdr *hdr, *inner_hdr; |
98 | struct ipv6_sr_hdr *isrh; | 99 | struct ipv6_sr_hdr *isrh; |
99 | int hdrlen, tot_len, err; | 100 | int hdrlen, tot_len, err; |
@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | |||
134 | isrh->nexthdr = proto; | 135 | isrh->nexthdr = proto; |
135 | 136 | ||
136 | hdr->daddr = isrh->segments[isrh->first_segment]; | 137 | hdr->daddr = isrh->segments[isrh->first_segment]; |
137 | set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr); | 138 | set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); |
138 | 139 | ||
139 | #ifdef CONFIG_IPV6_SEG6_HMAC | 140 | #ifdef CONFIG_IPV6_SEG6_HMAC |
140 | if (sr_has_hmac(isrh)) { | 141 | if (sr_has_hmac(isrh)) { |
@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla, | |||
418 | 419 | ||
419 | slwt = seg6_lwt_lwtunnel(newts); | 420 | slwt = seg6_lwt_lwtunnel(newts); |
420 | 421 | ||
421 | err = dst_cache_init(&slwt->cache, GFP_KERNEL); | 422 | err = dst_cache_init(&slwt->cache, GFP_ATOMIC); |
422 | if (err) { | 423 | if (err) { |
423 | kfree(newts); | 424 | kfree(newts); |
424 | return err; | 425 | return err; |
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index bb935a3b7fea..de1b0b8c53b0 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c | |||
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) | |||
92 | 92 | ||
93 | skb_reset_network_header(skb); | 93 | skb_reset_network_header(skb); |
94 | skb_mac_header_rebuild(skb); | 94 | skb_mac_header_rebuild(skb); |
95 | eth_hdr(skb)->h_proto = skb->protocol; | 95 | if (skb->mac_len) |
96 | eth_hdr(skb)->h_proto = skb->protocol; | ||
96 | 97 | ||
97 | err = 0; | 98 | err = 0; |
98 | 99 | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 88cd0c90fa81..cbb270bd81b0 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
113 | xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; | 113 | xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; |
114 | xdst->u.rt6.rt6i_dst = rt->rt6i_dst; | 114 | xdst->u.rt6.rt6i_dst = rt->rt6i_dst; |
115 | xdst->u.rt6.rt6i_src = rt->rt6i_src; | 115 | xdst->u.rt6.rt6i_src = rt->rt6i_src; |
116 | INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached); | ||
117 | rt6_uncached_list_add(&xdst->u.rt6); | ||
118 | atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache); | ||
116 | 119 | ||
117 | return 0; | 120 | return 0; |
118 | } | 121 | } |
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) | |||
244 | if (likely(xdst->u.rt6.rt6i_idev)) | 247 | if (likely(xdst->u.rt6.rt6i_idev)) |
245 | in6_dev_put(xdst->u.rt6.rt6i_idev); | 248 | in6_dev_put(xdst->u.rt6.rt6i_idev); |
246 | dst_destroy_metrics_generic(dst); | 249 | dst_destroy_metrics_generic(dst); |
250 | if (xdst->u.rt6.rt6i_uncached_list) | ||
251 | rt6_uncached_list_del(&xdst->u.rt6); | ||
247 | xfrm_dst_destroy(xdst); | 252 | xfrm_dst_destroy(xdst); |
248 | } | 253 | } |
249 | 254 | ||