aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-13 10:38:07 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-13 10:38:07 -0400
commitd2ddf628e90ffb92b411757eeb8655314371b879 (patch)
tree23450d6bb643bc348bb3199d506bbe29f77dcaa1
parent5a9f8df68ee6927f21dd3f2c75c16feb8b53a9e8 (diff)
parent0dcd7876029b58770f769cbb7b484e88e4a305e5 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
Steffen Klassert says: ==================== pull request (net): ipsec 2018-03-13 1) Refuse to insert 32 bit userspace socket policies on 64 bit systems like we do it for standard policies. We don't have a compat layer, so inserting socket policies from 32 bit userspace will lead to a broken configuration. 2) Make the policy hold queue work without the flowcache. Dummy bundles are not chached anymore, so we need to generate a new one on each lookup as long as the SAs are not yet in place. 3) Fix the validation of the esn replay attribute. The The sanity check in verify_replay() is bypassed if the XFRM_STATE_ESN flag is not set. Fix this by doing the sanity check uncoditionally. From Florian Westphal. 4) After most of the dst_entry garbage collection code is removed, we may leak xfrm_dst entries as they are neither cached nor tracked somewhere. Fix this by reusing the 'uncached_list' to track xfrm_dst entries too. From Xin Long. 5) Fix a rcu_read_lock/rcu_read_unlock imbalance in xfrm_get_tos() From Xin Long. 6) Fix an infinite loop in xfrm_get_dst_nexthop. On transport mode we fetch the child dst_entry after we continue, so this pointer is never updated. Fix this by fetching it before we continue. 7) Fix ESN sequence number gap after IPsec GSO packets. We accidentally increment the sequence number counter on the xfrm_state by one packet too much in the ESN case. Fix this by setting the sequence number to the correct value. 8) Reset the ethernet protocol after decapsulation only if a mac header was set. Otherwise it breaks configurations with TUN devices. From Yossi Kuperman. 9) Fix __this_cpu_read() usage in preemptible code. Use this_cpu_read() instead in ipcomp_alloc_tfms(). From Greg Hackmann. Please pull or let me know if there are problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/route.h3
-rw-r--r--net/ipv4/route.c21
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c3
-rw-r--r--net/ipv4/xfrm4_policy.c4
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c3
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c13
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c21
13 files changed, 56 insertions, 33 deletions
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 27d23a65f3cd..ac0866bb9e93 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -179,6 +179,9 @@ void rt6_disable_ip(struct net_device *dev, unsigned long event);
179void rt6_sync_down_dev(struct net_device *dev, unsigned long event); 179void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
180void rt6_multipath_rebalance(struct rt6_info *rt); 180void rt6_multipath_rebalance(struct rt6_info *rt);
181 181
182void rt6_uncached_list_add(struct rt6_info *rt);
183void rt6_uncached_list_del(struct rt6_info *rt);
184
182static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) 185static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
183{ 186{
184 const struct dst_entry *dst = skb_dst(skb); 187 const struct dst_entry *dst = skb_dst(skb);
diff --git a/include/net/route.h b/include/net/route.h
index 1eb9ce470e25..40b870d58f38 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -227,6 +227,9 @@ struct in_ifaddr;
227void fib_add_ifaddr(struct in_ifaddr *); 227void fib_add_ifaddr(struct in_ifaddr *);
228void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); 228void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
229 229
230void rt_add_uncached_list(struct rtable *rt);
231void rt_del_uncached_list(struct rtable *rt);
232
230static inline void ip_rt_put(struct rtable *rt) 233static inline void ip_rt_put(struct rtable *rt)
231{ 234{
232 /* dst_release() accepts a NULL parameter. 235 /* dst_release() accepts a NULL parameter.
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 860b3fd2f54b..f9dbb8cb66bf 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1393,7 +1393,7 @@ struct uncached_list {
1393 1393
1394static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1394static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1395 1395
1396static void rt_add_uncached_list(struct rtable *rt) 1396void rt_add_uncached_list(struct rtable *rt)
1397{ 1397{
1398 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1398 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1399 1399
@@ -1404,14 +1404,8 @@ static void rt_add_uncached_list(struct rtable *rt)
1404 spin_unlock_bh(&ul->lock); 1404 spin_unlock_bh(&ul->lock);
1405} 1405}
1406 1406
1407static void ipv4_dst_destroy(struct dst_entry *dst) 1407void rt_del_uncached_list(struct rtable *rt)
1408{ 1408{
1409 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1410 struct rtable *rt = (struct rtable *) dst;
1411
1412 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1413 kfree(p);
1414
1415 if (!list_empty(&rt->rt_uncached)) { 1409 if (!list_empty(&rt->rt_uncached)) {
1416 struct uncached_list *ul = rt->rt_uncached_list; 1410 struct uncached_list *ul = rt->rt_uncached_list;
1417 1411
@@ -1421,6 +1415,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1421 } 1415 }
1422} 1416}
1423 1417
1418static void ipv4_dst_destroy(struct dst_entry *dst)
1419{
1420 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1421 struct rtable *rt = (struct rtable *)dst;
1422
1423 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1424 kfree(p);
1425
1426 rt_del_uncached_list(rt);
1427}
1428
1424void rt_flush_dev(struct net_device *dev) 1429void rt_flush_dev(struct net_device *dev)
1425{ 1430{
1426 struct net *net = dev_net(dev); 1431 struct net *net = dev_net(dev);
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 63faeee989a9..2a9764bd1719 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 05017e2c849c..8d33f7b311f4 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -102,6 +102,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
102 xdst->u.rt.rt_pmtu = rt->rt_pmtu; 102 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
103 xdst->u.rt.rt_table_id = rt->rt_table_id; 103 xdst->u.rt.rt_table_id = rt->rt_table_id;
104 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); 104 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
105 rt_add_uncached_list(&xdst->u.rt);
105 106
106 return 0; 107 return 0;
107} 108}
@@ -241,7 +242,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
241 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 242 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
242 243
243 dst_destroy_metrics_generic(dst); 244 dst_destroy_metrics_generic(dst);
244 245 if (xdst->u.rt.rt_uncached_list)
246 rt_del_uncached_list(&xdst->u.rt);
245 xfrm_dst_destroy(xdst); 247 xfrm_dst_destroy(xdst);
246} 248}
247 249
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0db4218c9186..b3c1137ad0b8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,7 +128,7 @@ struct uncached_list {
128 128
129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
130 130
131static void rt6_uncached_list_add(struct rt6_info *rt) 131void rt6_uncached_list_add(struct rt6_info *rt)
132{ 132{
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
134 134
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
139 spin_unlock_bh(&ul->lock); 139 spin_unlock_bh(&ul->lock);
140} 140}
141 141
142static void rt6_uncached_list_del(struct rt6_info *rt) 142void rt6_uncached_list_del(struct rt6_info *rt)
143{ 143{
144 if (!list_empty(&rt->rt6i_uncached)) { 144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list; 145 struct uncached_list *ul = rt->rt6i_uncached_list;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index bb935a3b7fea..de1b0b8c53b0 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 09fb44ee3b45..416fe67271a9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; 113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst; 114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
115 xdst->u.rt6.rt6i_src = rt->rt6i_src; 115 xdst->u.rt6.rt6i_src = rt->rt6i_src;
116 INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
117 rt6_uncached_list_add(&xdst->u.rt6);
118 atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
116 119
117 return 0; 120 return 0;
118} 121}
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
244 if (likely(xdst->u.rt6.rt6i_idev)) 247 if (likely(xdst->u.rt6.rt6i_idev))
245 in6_dev_put(xdst->u.rt6.rt6i_idev); 248 in6_dev_put(xdst->u.rt6.rt6i_idev);
246 dst_destroy_metrics_generic(dst); 249 dst_destroy_metrics_generic(dst);
250 if (xdst->u.rt6.rt6i_uncached_list)
251 rt6_uncached_list_del(&xdst->u.rt6);
247 xfrm_dst_destroy(xdst); 252 xfrm_dst_destroy(xdst);
248} 253}
249 254
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc7115a83..a00ec715aa46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
283 struct crypto_comp *tfm; 283 struct crypto_comp *tfm;
284 284
285 /* This can be any valid CPU ID so we don't need locking. */ 285 /* This can be any valid CPU ID so we don't need locking. */
286 tfm = __this_cpu_read(*pos->tfms); 286 tfm = this_cpu_read(*pos->tfms);
287 287
288 if (!strcmp(crypto_comp_name(tfm), alg_name)) { 288 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
289 pos->users++; 289 pos->users++;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7a23078132cf..625b3fca5704 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1458,10 +1458,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1458static int xfrm_get_tos(const struct flowi *fl, int family) 1458static int xfrm_get_tos(const struct flowi *fl, int family)
1459{ 1459{
1460 const struct xfrm_policy_afinfo *afinfo; 1460 const struct xfrm_policy_afinfo *afinfo;
1461 int tos = 0; 1461 int tos;
1462 1462
1463 afinfo = xfrm_policy_get_afinfo(family); 1463 afinfo = xfrm_policy_get_afinfo(family);
1464 tos = afinfo ? afinfo->get_tos(fl) : 0; 1464 if (!afinfo)
1465 return 0;
1466
1467 tos = afinfo->get_tos(fl);
1465 1468
1466 rcu_read_unlock(); 1469 rcu_read_unlock();
1467 1470
@@ -1891,7 +1894,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
1891 spin_unlock(&pq->hold_queue.lock); 1894 spin_unlock(&pq->hold_queue.lock);
1892 1895
1893 dst_hold(xfrm_dst_path(dst)); 1896 dst_hold(xfrm_dst_path(dst));
1894 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0); 1897 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
1895 if (IS_ERR(dst)) 1898 if (IS_ERR(dst))
1896 goto purge_queue; 1899 goto purge_queue;
1897 1900
@@ -2729,14 +2732,14 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2729 while (dst->xfrm) { 2732 while (dst->xfrm) {
2730 const struct xfrm_state *xfrm = dst->xfrm; 2733 const struct xfrm_state *xfrm = dst->xfrm;
2731 2734
2735 dst = xfrm_dst_child(dst);
2736
2732 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 2737 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2733 continue; 2738 continue;
2734 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 2739 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2735 daddr = xfrm->coaddr; 2740 daddr = xfrm->coaddr;
2736 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 2741 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2737 daddr = &xfrm->id.daddr; 2742 daddr = &xfrm->id.daddr;
2738
2739 dst = xfrm_dst_child(dst);
2740 } 2743 }
2741 return daddr; 2744 return daddr;
2742} 2745}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 1d38c6acf8af..9e3a5e85f828 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -660,7 +660,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
660 } else { 660 } else {
661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; 661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; 662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
663 xo->seq.low = oseq = oseq + 1; 663 xo->seq.low = oseq + 1;
664 xo->seq.hi = oseq_hi; 664 xo->seq.hi = oseq_hi;
665 oseq += skb_shinfo(skb)->gso_segs; 665 oseq += skb_shinfo(skb)->gso_segs;
666 } 666 }
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 54e21f19d722..f9d2f2233f09 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2056,6 +2056,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
2056 struct xfrm_mgr *km; 2056 struct xfrm_mgr *km;
2057 struct xfrm_policy *pol = NULL; 2057 struct xfrm_policy *pol = NULL;
2058 2058
2059#ifdef CONFIG_COMPAT
2060 if (in_compat_syscall())
2061 return -EOPNOTSUPP;
2062#endif
2063
2059 if (!optval && !optlen) { 2064 if (!optval && !optlen) {
2060 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); 2065 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2061 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); 2066 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7f52b8eb177d..080035f056d9 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
122 struct xfrm_replay_state_esn *rs; 122 struct xfrm_replay_state_esn *rs;
123 123
124 if (p->flags & XFRM_STATE_ESN) { 124 if (!rt)
125 if (!rt) 125 return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
126 return -EINVAL;
127 126
128 rs = nla_data(rt); 127 rs = nla_data(rt);
129 128
130 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) 129 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
131 return -EINVAL; 130 return -EINVAL;
132
133 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
134 nla_len(rt) != sizeof(*rs))
135 return -EINVAL;
136 }
137 131
138 if (!rt) 132 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
139 return 0; 133 nla_len(rt) != sizeof(*rs))
134 return -EINVAL;
140 135
141 /* As only ESP and AH support ESN feature. */ 136 /* As only ESP and AH support ESN feature. */
142 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) 137 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))