aboutsummaryrefslogtreecommitdiffstats
path: root/net/l2tp/l2tp_ip.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-12-03 11:46:54 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-03 12:29:53 -0500
commit2745529ac7358fdac72e6b388da2e934bd9da82c (patch)
tree245bb05b1a18189c5a5212db914c70a636d8267a /net/l2tp/l2tp_ip.c
parentab17cb1fea82b346bdecd4f2d7f0e84e80f847af (diff)
parent8dc0f265d39a3933f4c1f846c7c694f12a2ab88a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Couple conflicts resolved here: 1) In the MACB driver, a bug fix to properly initialize the RX tail pointer properly overlapped with some changes to support variable sized rings. 2) In XGBE we had a "CONFIG_PM" --> "CONFIG_PM_SLEEP" fix overlapping with a reorganization of the driver to support ACPI, OF, as well as PCI variants of the chip. 3) In 'net' we had several probe error path bug fixes to the stmmac driver, meanwhile a lot of this code was cleaned up and reorganized in 'net-next'. 4) The cls_flower classifier obtained a helper function in 'net-next' called __fl_delete() and this overlapped with Daniel Borkamann's bug fix to use RCU for object destruction in 'net'. It also overlapped with Jiri's change to guard the rhashtable_remove_fast() call with a check against tc_skip_sw(). 5) In mlx4, a revert bug fix in 'net' overlapped with some unrelated changes in 'net-next'. 6) In geneve, a stale header pointer after pskb_expand_head() bug fix in 'net' overlapped with a large reorganization of the same code in 'net-next'. Since the 'net-next' code no longer had the bug in question, there was nothing to do other than to simply take the 'net-next' hunks. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/l2tp/l2tp_ip.c')
-rw-r--r--net/l2tp/l2tp_ip.c63
1 files changed, 34 insertions, 29 deletions
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 982f6c44ea01..8938b6ba57a0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -61,7 +61,8 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
61 if ((l2tp->conn_id == tunnel_id) && 61 if ((l2tp->conn_id == tunnel_id) &&
62 net_eq(sock_net(sk), net) && 62 net_eq(sock_net(sk), net) &&
63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && 63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
64 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 64 (!sk->sk_bound_dev_if || !dif ||
65 sk->sk_bound_dev_if == dif))
65 goto found; 66 goto found;
66 } 67 }
67 68
@@ -182,15 +183,17 @@ pass_up:
182 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 183 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
183 184
184 read_lock_bh(&l2tp_ip_lock); 185 read_lock_bh(&l2tp_ip_lock);
185 sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); 186 sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
187 tunnel_id);
188 if (!sk) {
189 read_unlock_bh(&l2tp_ip_lock);
190 goto discard;
191 }
192
193 sock_hold(sk);
186 read_unlock_bh(&l2tp_ip_lock); 194 read_unlock_bh(&l2tp_ip_lock);
187 } 195 }
188 196
189 if (sk == NULL)
190 goto discard;
191
192 sock_hold(sk);
193
194 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 197 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
195 goto discard_put; 198 goto discard_put;
196 199
@@ -256,15 +259,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
256 if (addr->l2tp_family != AF_INET) 259 if (addr->l2tp_family != AF_INET)
257 return -EINVAL; 260 return -EINVAL;
258 261
259 ret = -EADDRINUSE;
260 read_lock_bh(&l2tp_ip_lock);
261 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
262 sk->sk_bound_dev_if, addr->l2tp_conn_id))
263 goto out_in_use;
264
265 read_unlock_bh(&l2tp_ip_lock);
266
267 lock_sock(sk); 262 lock_sock(sk);
263
264 ret = -EINVAL;
268 if (!sock_flag(sk, SOCK_ZAPPED)) 265 if (!sock_flag(sk, SOCK_ZAPPED))
269 goto out; 266 goto out;
270 267
@@ -281,14 +278,22 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
281 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 278 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
282 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 279 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
283 inet->inet_saddr = 0; /* Use device */ 280 inet->inet_saddr = 0; /* Use device */
284 sk_dst_reset(sk);
285 281
282 write_lock_bh(&l2tp_ip_lock);
283 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
284 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
285 write_unlock_bh(&l2tp_ip_lock);
286 ret = -EADDRINUSE;
287 goto out;
288 }
289
290 sk_dst_reset(sk);
286 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; 291 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
287 292
288 write_lock_bh(&l2tp_ip_lock);
289 sk_add_bind_node(sk, &l2tp_ip_bind_table); 293 sk_add_bind_node(sk, &l2tp_ip_bind_table);
290 sk_del_node_init(sk); 294 sk_del_node_init(sk);
291 write_unlock_bh(&l2tp_ip_lock); 295 write_unlock_bh(&l2tp_ip_lock);
296
292 ret = 0; 297 ret = 0;
293 sock_reset_flag(sk, SOCK_ZAPPED); 298 sock_reset_flag(sk, SOCK_ZAPPED);
294 299
@@ -296,11 +301,6 @@ out:
296 release_sock(sk); 301 release_sock(sk);
297 302
298 return ret; 303 return ret;
299
300out_in_use:
301 read_unlock_bh(&l2tp_ip_lock);
302
303 return ret;
304} 304}
305 305
306static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 306static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -308,21 +308,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
308 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 308 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
309 int rc; 309 int rc;
310 310
311 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
312 return -EINVAL;
313
314 if (addr_len < sizeof(*lsa)) 311 if (addr_len < sizeof(*lsa))
315 return -EINVAL; 312 return -EINVAL;
316 313
317 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 314 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
318 return -EINVAL; 315 return -EINVAL;
319 316
320 rc = ip4_datagram_connect(sk, uaddr, addr_len);
321 if (rc < 0)
322 return rc;
323
324 lock_sock(sk); 317 lock_sock(sk);
325 318
319 /* Must bind first - autobinding does not work */
320 if (sock_flag(sk, SOCK_ZAPPED)) {
321 rc = -EINVAL;
322 goto out_sk;
323 }
324
325 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
326 if (rc < 0)
327 goto out_sk;
328
326 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 329 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
327 330
328 write_lock_bh(&l2tp_ip_lock); 331 write_lock_bh(&l2tp_ip_lock);
@@ -330,7 +333,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
330 sk_add_bind_node(sk, &l2tp_ip_bind_table); 333 sk_add_bind_node(sk, &l2tp_ip_bind_table);
331 write_unlock_bh(&l2tp_ip_lock); 334 write_unlock_bh(&l2tp_ip_lock);
332 335
336out_sk:
333 release_sock(sk); 337 release_sock(sk);
338
334 return rc; 339 return rc;
335} 340}
336 341