diff options
author | David S. Miller <davem@davemloft.net> | 2013-12-19 18:37:49 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-12-19 18:37:49 -0500 |
commit | 1669cb9855050fe9d2a13391846f9aceccf42559 (patch) | |
tree | 80a2f1229902e9db7fd1552ee770372b351f2036 /net | |
parent | cb4eae3d525abbe408e7e0efd7841b5c3c13cd0f (diff) | |
parent | b3c6efbc36e2c5ac820b1a800ac17cc3e040de0c (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next
Steffen Klassert says:
====================
pull request (net-next): ipsec-next 2013-12-19
1) Use the user supplied policy index instead of a generated one
if present. From Fan Du.
2) Make xfrm migration namespace aware. From Fan Du.
3) Make the xfrm state and policy locks namespace aware. From Fan Du.
4) Remove ancient sleeping when the SA is in acquire state,
we now queue packets to the policy instead. This replaces the
sleeping code.
5) Remove FLOWI_FLAG_CAN_SLEEP. This was used to notify xfrm about the
posibility to sleep. The sleeping code is gone, so remove it.
6) Check user specified spi for IPComp. Thr spi for IPcomp is only
16 bit wide, so check for a valid value. From Fan Du.
7) Export verify_userspi_info to check for valid user supplied spi ranges
with pfkey and netlink. From Fan Du.
8) RFC3173 states that if the total size of a compressed payload and the IPComp
header is not smaller than the size of the original payload, the IP datagram
must be sent in the original non-compressed form. These packets are dropped
by the inbound policy check because they are not transformed. Document the need
to set 'level use' for IPcomp to receive such packets anyway. From Fan Du.
Please pull or let me know if there are problems.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/dccp/ipv4.c | 2 | ||||
-rw-r--r-- | net/dccp/ipv6.c | 8 | ||||
-rw-r--r-- | net/decnet/dn_route.c | 2 | ||||
-rw-r--r-- | net/ipv4/af_inet.c | 2 | ||||
-rw-r--r-- | net/ipv4/datagram.c | 2 | ||||
-rw-r--r-- | net/ipv4/raw.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv4/udp.c | 2 | ||||
-rw-r--r-- | net/ipv6/af_inet6.c | 2 | ||||
-rw-r--r-- | net/ipv6/datagram.c | 2 | ||||
-rw-r--r-- | net/ipv6/inet6_connection_sock.c | 4 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 12 | ||||
-rw-r--r-- | net/ipv6/ping.c | 2 | ||||
-rw-r--r-- | net/ipv6/raw.c | 2 | ||||
-rw-r--r-- | net/ipv6/syncookies.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 4 | ||||
-rw-r--r-- | net/ipv6/udp.c | 2 | ||||
-rw-r--r-- | net/key/af_key.c | 29 | ||||
-rw-r--r-- | net/l2tp/l2tp_ip6.c | 2 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 4 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 151 | ||||
-rw-r--r-- | net/xfrm/xfrm_state.c | 149 | ||||
-rw-r--r-- | net/xfrm/xfrm_user.c | 56 |
23 files changed, 222 insertions, 223 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index d9f65fc66db5..88299c29101d 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -75,7 +75,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
75 | rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, | 75 | rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, |
76 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, | 76 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, |
77 | IPPROTO_DCCP, | 77 | IPPROTO_DCCP, |
78 | orig_sport, orig_dport, sk, true); | 78 | orig_sport, orig_dport, sk); |
79 | if (IS_ERR(rt)) | 79 | if (IS_ERR(rt)) |
80 | return PTR_ERR(rt); | 80 | return PTR_ERR(rt); |
81 | 81 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 629019e6f8e9..4db3c2a1679c 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -240,7 +240,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) | |||
240 | 240 | ||
241 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 241 | final_p = fl6_update_dst(&fl6, np->opt, &final); |
242 | 242 | ||
243 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); | 243 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
244 | if (IS_ERR(dst)) { | 244 | if (IS_ERR(dst)) { |
245 | err = PTR_ERR(dst); | 245 | err = PTR_ERR(dst); |
246 | dst = NULL; | 246 | dst = NULL; |
@@ -304,7 +304,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
304 | security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); | 304 | security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); |
305 | 305 | ||
306 | /* sk = NULL, but it is safe for now. RST socket required. */ | 306 | /* sk = NULL, but it is safe for now. RST socket required. */ |
307 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); | 307 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); |
308 | if (!IS_ERR(dst)) { | 308 | if (!IS_ERR(dst)) { |
309 | skb_dst_set(skb, dst); | 309 | skb_dst_set(skb, dst); |
310 | ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); | 310 | ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); |
@@ -515,7 +515,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
515 | fl6.fl6_sport = htons(ireq->ir_num); | 515 | fl6.fl6_sport = htons(ireq->ir_num); |
516 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 516 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
517 | 517 | ||
518 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); | 518 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
519 | if (IS_ERR(dst)) | 519 | if (IS_ERR(dst)) |
520 | goto out; | 520 | goto out; |
521 | } | 521 | } |
@@ -934,7 +934,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
934 | 934 | ||
935 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 935 | final_p = fl6_update_dst(&fl6, np->opt, &final); |
936 | 936 | ||
937 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); | 937 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
938 | if (IS_ERR(dst)) { | 938 | if (IS_ERR(dst)) { |
939 | err = PTR_ERR(dst); | 939 | err = PTR_ERR(dst); |
940 | goto failure; | 940 | goto failure; |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index fe32388ea24f..ad2efa5b861b 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -1288,8 +1288,6 @@ int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, stru | |||
1288 | 1288 | ||
1289 | err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); | 1289 | err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); |
1290 | if (err == 0 && fl->flowidn_proto) { | 1290 | if (err == 0 && fl->flowidn_proto) { |
1291 | if (!(flags & MSG_DONTWAIT)) | ||
1292 | fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP; | ||
1293 | *pprt = xfrm_lookup(&init_net, *pprt, | 1291 | *pprt = xfrm_lookup(&init_net, *pprt, |
1294 | flowidn_to_flowi(fl), sk, 0); | 1292 | flowidn_to_flowi(fl), sk, 0); |
1295 | if (IS_ERR(*pprt)) { | 1293 | if (IS_ERR(*pprt)) { |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 6b1193e63911..b8bc1a3d5cf1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1130,7 +1130,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1130 | fl4 = &inet->cork.fl.u.ip4; | 1130 | fl4 = &inet->cork.fl.u.ip4; |
1131 | rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk), | 1131 | rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk), |
1132 | sk->sk_bound_dev_if, sk->sk_protocol, | 1132 | sk->sk_bound_dev_if, sk->sk_protocol, |
1133 | inet->inet_sport, inet->inet_dport, sk, false); | 1133 | inet->inet_sport, inet->inet_dport, sk); |
1134 | if (IS_ERR(rt)) | 1134 | if (IS_ERR(rt)) |
1135 | return PTR_ERR(rt); | 1135 | return PTR_ERR(rt); |
1136 | 1136 | ||
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 19e36376d2a0..8b5134c582f1 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -53,7 +53,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
53 | rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr, | 53 | rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr, |
54 | RT_CONN_FLAGS(sk), oif, | 54 | RT_CONN_FLAGS(sk), oif, |
55 | sk->sk_protocol, | 55 | sk->sk_protocol, |
56 | inet->inet_sport, usin->sin_port, sk, true); | 56 | inet->inet_sport, usin->sin_port, sk); |
57 | if (IS_ERR(rt)) { | 57 | if (IS_ERR(rt)) { |
58 | err = PTR_ERR(rt); | 58 | err = PTR_ERR(rt); |
59 | if (err == -ENETUNREACH) | 59 | if (err == -ENETUNREACH) |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 23c3e5b5bb53..81e6cfd5a365 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -575,7 +575,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
575 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 575 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, |
576 | RT_SCOPE_UNIVERSE, | 576 | RT_SCOPE_UNIVERSE, |
577 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, | 577 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, |
578 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP | | 578 | inet_sk_flowi_flags(sk) | |
579 | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), | 579 | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), |
580 | daddr, saddr, 0, 0); | 580 | daddr, saddr, 0, 0); |
581 | 581 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 067213924751..bbaf8cb45eb2 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -173,7 +173,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
173 | rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, | 173 | rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, |
174 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, | 174 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, |
175 | IPPROTO_TCP, | 175 | IPPROTO_TCP, |
176 | orig_sport, orig_dport, sk, true); | 176 | orig_sport, orig_dport, sk); |
177 | if (IS_ERR(rt)) { | 177 | if (IS_ERR(rt)) { |
178 | err = PTR_ERR(rt); | 178 | err = PTR_ERR(rt); |
179 | if (err == -ENETUNREACH) | 179 | if (err == -ENETUNREACH) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f140048334ce..d5d24ecde6a5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -986,7 +986,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
986 | fl4 = &fl4_stack; | 986 | fl4 = &fl4_stack; |
987 | flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, | 987 | flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, |
988 | RT_SCOPE_UNIVERSE, sk->sk_protocol, | 988 | RT_SCOPE_UNIVERSE, sk->sk_protocol, |
989 | inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP, | 989 | inet_sk_flowi_flags(sk), |
990 | faddr, saddr, dport, inet->inet_sport); | 990 | faddr, saddr, dport, inet->inet_sport); |
991 | 991 | ||
992 | security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); | 992 | security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 5e76dfa765c4..c921d5d38831 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -661,7 +661,7 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
661 | 661 | ||
662 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 662 | final_p = fl6_update_dst(&fl6, np->opt, &final); |
663 | 663 | ||
664 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); | 664 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
665 | if (IS_ERR(dst)) { | 665 | if (IS_ERR(dst)) { |
666 | sk->sk_route_caps = 0; | 666 | sk->sk_route_caps = 0; |
667 | sk->sk_err_soft = -PTR_ERR(dst); | 667 | sk->sk_err_soft = -PTR_ERR(dst); |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 93b1aa34c432..6983058942ea 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -170,7 +170,7 @@ ipv4_connected: | |||
170 | opt = flowlabel ? flowlabel->opt : np->opt; | 170 | opt = flowlabel ? flowlabel->opt : np->opt; |
171 | final_p = fl6_update_dst(&fl6, opt, &final); | 171 | final_p = fl6_update_dst(&fl6, opt, &final); |
172 | 172 | ||
173 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); | 173 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
174 | err = 0; | 174 | err = 0; |
175 | if (IS_ERR(dst)) { | 175 | if (IS_ERR(dst)) { |
176 | err = PTR_ERR(dst); | 176 | err = PTR_ERR(dst); |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 77bb8afb141d..c9138189415a 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -86,7 +86,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk, | |||
86 | fl6->fl6_sport = htons(ireq->ir_num); | 86 | fl6->fl6_sport = htons(ireq->ir_num); |
87 | security_req_classify_flow(req, flowi6_to_flowi(fl6)); | 87 | security_req_classify_flow(req, flowi6_to_flowi(fl6)); |
88 | 88 | ||
89 | dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); | 89 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
90 | if (IS_ERR(dst)) | 90 | if (IS_ERR(dst)) |
91 | return NULL; | 91 | return NULL; |
92 | 92 | ||
@@ -216,7 +216,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, | |||
216 | 216 | ||
217 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); | 217 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); |
218 | if (!dst) { | 218 | if (!dst) { |
219 | dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); | 219 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
220 | 220 | ||
221 | if (!IS_ERR(dst)) | 221 | if (!IS_ERR(dst)) |
222 | __inet6_csk_dst_store(sk, dst, NULL, NULL); | 222 | __inet6_csk_dst_store(sk, dst, NULL, NULL); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index bc4e1bcdf4c0..788c01a53593 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -941,7 +941,6 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); | |||
941 | * @sk: socket which provides route info | 941 | * @sk: socket which provides route info |
942 | * @fl6: flow to lookup | 942 | * @fl6: flow to lookup |
943 | * @final_dst: final destination address for ipsec lookup | 943 | * @final_dst: final destination address for ipsec lookup |
944 | * @can_sleep: we are in a sleepable context | ||
945 | * | 944 | * |
946 | * This function performs a route lookup on the given flow. | 945 | * This function performs a route lookup on the given flow. |
947 | * | 946 | * |
@@ -949,8 +948,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); | |||
949 | * error code. | 948 | * error code. |
950 | */ | 949 | */ |
951 | struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | 950 | struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, |
952 | const struct in6_addr *final_dst, | 951 | const struct in6_addr *final_dst) |
953 | bool can_sleep) | ||
954 | { | 952 | { |
955 | struct dst_entry *dst = NULL; | 953 | struct dst_entry *dst = NULL; |
956 | int err; | 954 | int err; |
@@ -960,8 +958,6 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
960 | return ERR_PTR(err); | 958 | return ERR_PTR(err); |
961 | if (final_dst) | 959 | if (final_dst) |
962 | fl6->daddr = *final_dst; | 960 | fl6->daddr = *final_dst; |
963 | if (can_sleep) | ||
964 | fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; | ||
965 | 961 | ||
966 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | 962 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
967 | } | 963 | } |
@@ -972,7 +968,6 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); | |||
972 | * @sk: socket which provides the dst cache and route info | 968 | * @sk: socket which provides the dst cache and route info |
973 | * @fl6: flow to lookup | 969 | * @fl6: flow to lookup |
974 | * @final_dst: final destination address for ipsec lookup | 970 | * @final_dst: final destination address for ipsec lookup |
975 | * @can_sleep: we are in a sleepable context | ||
976 | * | 971 | * |
977 | * This function performs a route lookup on the given flow with the | 972 | * This function performs a route lookup on the given flow with the |
978 | * possibility of using the cached route in the socket if it is valid. | 973 | * possibility of using the cached route in the socket if it is valid. |
@@ -983,8 +978,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); | |||
983 | * error code. | 978 | * error code. |
984 | */ | 979 | */ |
985 | struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | 980 | struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, |
986 | const struct in6_addr *final_dst, | 981 | const struct in6_addr *final_dst) |
987 | bool can_sleep) | ||
988 | { | 982 | { |
989 | struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); | 983 | struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); |
990 | int err; | 984 | int err; |
@@ -996,8 +990,6 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
996 | return ERR_PTR(err); | 990 | return ERR_PTR(err); |
997 | if (final_dst) | 991 | if (final_dst) |
998 | fl6->daddr = *final_dst; | 992 | fl6->daddr = *final_dst; |
999 | if (can_sleep) | ||
1000 | fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; | ||
1001 | 993 | ||
1002 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | 994 | return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
1003 | } | 995 | } |
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index a83243c3d656..15d23b8c2129 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -145,7 +145,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
145 | else if (!fl6.flowi6_oif) | 145 | else if (!fl6.flowi6_oif) |
146 | fl6.flowi6_oif = np->ucast_oif; | 146 | fl6.flowi6_oif = np->ucast_oif; |
147 | 147 | ||
148 | dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, 1); | 148 | dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr); |
149 | if (IS_ERR(dst)) | 149 | if (IS_ERR(dst)) |
150 | return PTR_ERR(dst); | 150 | return PTR_ERR(dst); |
151 | rt = (struct rt6_info *) dst; | 151 | rt = (struct rt6_info *) dst; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index b6bb87e55805..5f10b7ea7ccc 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -864,7 +864,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
864 | fl6.flowi6_oif = np->ucast_oif; | 864 | fl6.flowi6_oif = np->ucast_oif; |
865 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 865 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
866 | 866 | ||
867 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); | 867 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
868 | if (IS_ERR(dst)) { | 868 | if (IS_ERR(dst)) { |
869 | err = PTR_ERR(dst); | 869 | err = PTR_ERR(dst); |
870 | goto out; | 870 | goto out; |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 535a3ad262f1..bb53a5e73c1a 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -247,7 +247,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
247 | fl6.fl6_sport = inet_sk(sk)->inet_sport; | 247 | fl6.fl6_sport = inet_sk(sk)->inet_sport; |
248 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); | 248 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); |
249 | 249 | ||
250 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); | 250 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
251 | if (IS_ERR(dst)) | 251 | if (IS_ERR(dst)) |
252 | goto out_free; | 252 | goto out_free; |
253 | } | 253 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d955487f2c54..2bb87b852125 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -257,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
257 | 257 | ||
258 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 258 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
259 | 259 | ||
260 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); | 260 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
261 | if (IS_ERR(dst)) { | 261 | if (IS_ERR(dst)) { |
262 | err = PTR_ERR(dst); | 262 | err = PTR_ERR(dst); |
263 | goto failure; | 263 | goto failure; |
@@ -803,7 +803,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
803 | * Underlying function will use this to retrieve the network | 803 | * Underlying function will use this to retrieve the network |
804 | * namespace | 804 | * namespace |
805 | */ | 805 | */ |
806 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); | 806 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); |
807 | if (!IS_ERR(dst)) { | 807 | if (!IS_ERR(dst)) { |
808 | skb_dst_set(buff, dst); | 808 | skb_dst_set(buff, dst); |
809 | ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); | 809 | ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 65ed5cd79264..fa9d988f4012 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1223,7 +1223,7 @@ do_udp_sendmsg: | |||
1223 | 1223 | ||
1224 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 1224 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
1225 | 1225 | ||
1226 | dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true); | 1226 | dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); |
1227 | if (IS_ERR(dst)) { | 1227 | if (IS_ERR(dst)) { |
1228 | err = PTR_ERR(dst); | 1228 | err = PTR_ERR(dst); |
1229 | dst = NULL; | 1229 | dst = NULL; |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 545f047868ad..1a04c1329362 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1340,6 +1340,12 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
1340 | max_spi = range->sadb_spirange_max; | 1340 | max_spi = range->sadb_spirange_max; |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | err = verify_spi_info(x->id.proto, min_spi, max_spi); | ||
1344 | if (err) { | ||
1345 | xfrm_state_put(x); | ||
1346 | return err; | ||
1347 | } | ||
1348 | |||
1343 | err = xfrm_alloc_spi(x, min_spi, max_spi); | 1349 | err = xfrm_alloc_spi(x, min_spi, max_spi); |
1344 | resp_skb = err ? ERR_PTR(err) : pfkey_xfrm_state2msg(x); | 1350 | resp_skb = err ? ERR_PTR(err) : pfkey_xfrm_state2msg(x); |
1345 | 1351 | ||
@@ -1380,10 +1386,9 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb | |||
1380 | return 0; | 1386 | return 0; |
1381 | 1387 | ||
1382 | spin_lock_bh(&x->lock); | 1388 | spin_lock_bh(&x->lock); |
1383 | if (x->km.state == XFRM_STATE_ACQ) { | 1389 | if (x->km.state == XFRM_STATE_ACQ) |
1384 | x->km.state = XFRM_STATE_ERROR; | 1390 | x->km.state = XFRM_STATE_ERROR; |
1385 | wake_up(&net->xfrm.km_waitq); | 1391 | |
1386 | } | ||
1387 | spin_unlock_bh(&x->lock); | 1392 | spin_unlock_bh(&x->lock); |
1388 | xfrm_state_put(x); | 1393 | xfrm_state_put(x); |
1389 | return 0; | 1394 | return 0; |
@@ -1785,7 +1790,9 @@ static int pfkey_dump_sa(struct pfkey_sock *pfk) | |||
1785 | 1790 | ||
1786 | static void pfkey_dump_sa_done(struct pfkey_sock *pfk) | 1791 | static void pfkey_dump_sa_done(struct pfkey_sock *pfk) |
1787 | { | 1792 | { |
1788 | xfrm_state_walk_done(&pfk->dump.u.state); | 1793 | struct net *net = sock_net(&pfk->sk); |
1794 | |||
1795 | xfrm_state_walk_done(&pfk->dump.u.state, net); | ||
1789 | } | 1796 | } |
1790 | 1797 | ||
1791 | static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) | 1798 | static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) |
@@ -1861,7 +1868,7 @@ static u32 gen_reqid(struct net *net) | |||
1861 | reqid = IPSEC_MANUAL_REQID_MAX+1; | 1868 | reqid = IPSEC_MANUAL_REQID_MAX+1; |
1862 | xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); | 1869 | xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); |
1863 | rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid); | 1870 | rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid); |
1864 | xfrm_policy_walk_done(&walk); | 1871 | xfrm_policy_walk_done(&walk, net); |
1865 | if (rc != -EEXIST) | 1872 | if (rc != -EEXIST) |
1866 | return reqid; | 1873 | return reqid; |
1867 | } while (reqid != start); | 1874 | } while (reqid != start); |
@@ -2485,6 +2492,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, | |||
2485 | struct xfrm_selector sel; | 2492 | struct xfrm_selector sel; |
2486 | struct xfrm_migrate m[XFRM_MAX_DEPTH]; | 2493 | struct xfrm_migrate m[XFRM_MAX_DEPTH]; |
2487 | struct xfrm_kmaddress k; | 2494 | struct xfrm_kmaddress k; |
2495 | struct net *net = sock_net(sk); | ||
2488 | 2496 | ||
2489 | if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1], | 2497 | if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1], |
2490 | ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) || | 2498 | ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) || |
@@ -2558,7 +2566,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, | |||
2558 | } | 2566 | } |
2559 | 2567 | ||
2560 | return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i, | 2568 | return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i, |
2561 | kma ? &k : NULL); | 2569 | kma ? &k : NULL, net); |
2562 | 2570 | ||
2563 | out: | 2571 | out: |
2564 | return err; | 2572 | return err; |
@@ -2659,7 +2667,9 @@ static int pfkey_dump_sp(struct pfkey_sock *pfk) | |||
2659 | 2667 | ||
2660 | static void pfkey_dump_sp_done(struct pfkey_sock *pfk) | 2668 | static void pfkey_dump_sp_done(struct pfkey_sock *pfk) |
2661 | { | 2669 | { |
2662 | xfrm_policy_walk_done(&pfk->dump.u.policy); | 2670 | struct net *net = sock_net((struct sock *)pfk); |
2671 | |||
2672 | xfrm_policy_walk_done(&pfk->dump.u.policy, net); | ||
2663 | } | 2673 | } |
2664 | 2674 | ||
2665 | static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) | 2675 | static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) |
@@ -3569,6 +3579,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb, | |||
3569 | struct sk_buff *skb = NULL; | 3579 | struct sk_buff *skb = NULL; |
3570 | struct sadb_msg *hdr = NULL; | 3580 | struct sadb_msg *hdr = NULL; |
3571 | int err; | 3581 | int err; |
3582 | struct net *net = sock_net(sk); | ||
3572 | 3583 | ||
3573 | err = -EOPNOTSUPP; | 3584 | err = -EOPNOTSUPP; |
3574 | if (msg->msg_flags & MSG_OOB) | 3585 | if (msg->msg_flags & MSG_OOB) |
@@ -3591,9 +3602,9 @@ static int pfkey_sendmsg(struct kiocb *kiocb, | |||
3591 | if (!hdr) | 3602 | if (!hdr) |
3592 | goto out; | 3603 | goto out; |
3593 | 3604 | ||
3594 | mutex_lock(&xfrm_cfg_mutex); | 3605 | mutex_lock(&net->xfrm.xfrm_cfg_mutex); |
3595 | err = pfkey_process(sk, skb, hdr); | 3606 | err = pfkey_process(sk, skb, hdr); |
3596 | mutex_unlock(&xfrm_cfg_mutex); | 3607 | mutex_unlock(&net->xfrm.xfrm_cfg_mutex); |
3597 | 3608 | ||
3598 | out: | 3609 | out: |
3599 | if (err && hdr && pfkey_error(hdr, err, sk) == 0) | 3610 | if (err && hdr && pfkey_error(hdr, err, sk) == 0) |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index bb6e206ea70b..29487a8f7fa0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -597,7 +597,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
597 | 597 | ||
598 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 598 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
599 | 599 | ||
600 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); | 600 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
601 | if (IS_ERR(dst)) { | 601 | if (IS_ERR(dst)) { |
602 | err = PTR_ERR(dst); | 602 | err = PTR_ERR(dst); |
603 | goto out; | 603 | goto out; |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 32db816ffbaa..317e13eb2c56 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -262,7 +262,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
262 | } | 262 | } |
263 | 263 | ||
264 | final_p = fl6_update_dst(fl6, np->opt, &final); | 264 | final_p = fl6_update_dst(fl6, np->opt, &final); |
265 | dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); | 265 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
266 | if (!asoc || saddr) | 266 | if (!asoc || saddr) |
267 | goto out; | 267 | goto out; |
268 | 268 | ||
@@ -321,7 +321,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
321 | fl6->saddr = baddr->v6.sin6_addr; | 321 | fl6->saddr = baddr->v6.sin6_addr; |
322 | fl6->fl6_sport = baddr->v6.sin6_port; | 322 | fl6->fl6_sport = baddr->v6.sin6_port; |
323 | final_p = fl6_update_dst(fl6, np->opt, &final); | 323 | final_p = fl6_update_dst(fl6, np->opt, &final); |
324 | dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); | 324 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
325 | } | 325 | } |
326 | 326 | ||
327 | out: | 327 | out: |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9a91f7431c41..a7487f34e813 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -39,12 +39,7 @@ | |||
39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) | 39 | #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) |
40 | #define XFRM_MAX_QUEUE_LEN 100 | 40 | #define XFRM_MAX_QUEUE_LEN 100 |
41 | 41 | ||
42 | DEFINE_MUTEX(xfrm_cfg_mutex); | ||
43 | EXPORT_SYMBOL(xfrm_cfg_mutex); | ||
44 | |||
45 | static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); | ||
46 | static struct dst_entry *xfrm_policy_sk_bundles; | 42 | static struct dst_entry *xfrm_policy_sk_bundles; |
47 | static DEFINE_RWLOCK(xfrm_policy_lock); | ||
48 | 43 | ||
49 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); | 44 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); |
50 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] | 45 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] |
@@ -438,7 +433,7 @@ static void xfrm_bydst_resize(struct net *net, int dir) | |||
438 | if (!ndst) | 433 | if (!ndst) |
439 | return; | 434 | return; |
440 | 435 | ||
441 | write_lock_bh(&xfrm_policy_lock); | 436 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
442 | 437 | ||
443 | for (i = hmask; i >= 0; i--) | 438 | for (i = hmask; i >= 0; i--) |
444 | xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); | 439 | xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); |
@@ -446,7 +441,7 @@ static void xfrm_bydst_resize(struct net *net, int dir) | |||
446 | net->xfrm.policy_bydst[dir].table = ndst; | 441 | net->xfrm.policy_bydst[dir].table = ndst; |
447 | net->xfrm.policy_bydst[dir].hmask = nhashmask; | 442 | net->xfrm.policy_bydst[dir].hmask = nhashmask; |
448 | 443 | ||
449 | write_unlock_bh(&xfrm_policy_lock); | 444 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
450 | 445 | ||
451 | xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); | 446 | xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); |
452 | } | 447 | } |
@@ -463,7 +458,7 @@ static void xfrm_byidx_resize(struct net *net, int total) | |||
463 | if (!nidx) | 458 | if (!nidx) |
464 | return; | 459 | return; |
465 | 460 | ||
466 | write_lock_bh(&xfrm_policy_lock); | 461 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
467 | 462 | ||
468 | for (i = hmask; i >= 0; i--) | 463 | for (i = hmask; i >= 0; i--) |
469 | xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); | 464 | xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); |
@@ -471,7 +466,7 @@ static void xfrm_byidx_resize(struct net *net, int total) | |||
471 | net->xfrm.policy_byidx = nidx; | 466 | net->xfrm.policy_byidx = nidx; |
472 | net->xfrm.policy_idx_hmask = nhashmask; | 467 | net->xfrm.policy_idx_hmask = nhashmask; |
473 | 468 | ||
474 | write_unlock_bh(&xfrm_policy_lock); | 469 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
475 | 470 | ||
476 | xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); | 471 | xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); |
477 | } | 472 | } |
@@ -504,7 +499,7 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total) | |||
504 | 499 | ||
505 | void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) | 500 | void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) |
506 | { | 501 | { |
507 | read_lock_bh(&xfrm_policy_lock); | 502 | read_lock_bh(&net->xfrm.xfrm_policy_lock); |
508 | si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; | 503 | si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; |
509 | si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; | 504 | si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; |
510 | si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; | 505 | si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; |
@@ -513,7 +508,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) | |||
513 | si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; | 508 | si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; |
514 | si->spdhcnt = net->xfrm.policy_idx_hmask; | 509 | si->spdhcnt = net->xfrm.policy_idx_hmask; |
515 | si->spdhmcnt = xfrm_policy_hashmax; | 510 | si->spdhmcnt = xfrm_policy_hashmax; |
516 | read_unlock_bh(&xfrm_policy_lock); | 511 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); |
517 | } | 512 | } |
518 | EXPORT_SYMBOL(xfrm_spd_getinfo); | 513 | EXPORT_SYMBOL(xfrm_spd_getinfo); |
519 | 514 | ||
@@ -538,7 +533,7 @@ static void xfrm_hash_resize(struct work_struct *work) | |||
538 | 533 | ||
539 | /* Generate new index... KAME seems to generate them ordered by cost | 534 | /* Generate new index... KAME seems to generate them ordered by cost |
540 | * of an absolute inpredictability of ordering of rules. This will not pass. */ | 535 | * of an absolute inpredictability of ordering of rules. This will not pass. */ |
541 | static u32 xfrm_gen_index(struct net *net, int dir) | 536 | static u32 xfrm_gen_index(struct net *net, int dir, u32 index) |
542 | { | 537 | { |
543 | static u32 idx_generator; | 538 | static u32 idx_generator; |
544 | 539 | ||
@@ -548,8 +543,14 @@ static u32 xfrm_gen_index(struct net *net, int dir) | |||
548 | u32 idx; | 543 | u32 idx; |
549 | int found; | 544 | int found; |
550 | 545 | ||
551 | idx = (idx_generator | dir); | 546 | if (!index) { |
552 | idx_generator += 8; | 547 | idx = (idx_generator | dir); |
548 | idx_generator += 8; | ||
549 | } else { | ||
550 | idx = index; | ||
551 | index = 0; | ||
552 | } | ||
553 | |||
553 | if (idx == 0) | 554 | if (idx == 0) |
554 | idx = 8; | 555 | idx = 8; |
555 | list = net->xfrm.policy_byidx + idx_hash(net, idx); | 556 | list = net->xfrm.policy_byidx + idx_hash(net, idx); |
@@ -630,7 +631,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
630 | struct hlist_head *chain; | 631 | struct hlist_head *chain; |
631 | struct hlist_node *newpos; | 632 | struct hlist_node *newpos; |
632 | 633 | ||
633 | write_lock_bh(&xfrm_policy_lock); | 634 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
634 | chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); | 635 | chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); |
635 | delpol = NULL; | 636 | delpol = NULL; |
636 | newpos = NULL; | 637 | newpos = NULL; |
@@ -641,7 +642,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
641 | xfrm_sec_ctx_match(pol->security, policy->security) && | 642 | xfrm_sec_ctx_match(pol->security, policy->security) && |
642 | !WARN_ON(delpol)) { | 643 | !WARN_ON(delpol)) { |
643 | if (excl) { | 644 | if (excl) { |
644 | write_unlock_bh(&xfrm_policy_lock); | 645 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
645 | return -EEXIST; | 646 | return -EEXIST; |
646 | } | 647 | } |
647 | delpol = pol; | 648 | delpol = pol; |
@@ -672,14 +673,14 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
672 | xfrm_policy_requeue(delpol, policy); | 673 | xfrm_policy_requeue(delpol, policy); |
673 | __xfrm_policy_unlink(delpol, dir); | 674 | __xfrm_policy_unlink(delpol, dir); |
674 | } | 675 | } |
675 | policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); | 676 | policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); |
676 | hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); | 677 | hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); |
677 | policy->curlft.add_time = get_seconds(); | 678 | policy->curlft.add_time = get_seconds(); |
678 | policy->curlft.use_time = 0; | 679 | policy->curlft.use_time = 0; |
679 | if (!mod_timer(&policy->timer, jiffies + HZ)) | 680 | if (!mod_timer(&policy->timer, jiffies + HZ)) |
680 | xfrm_pol_hold(policy); | 681 | xfrm_pol_hold(policy); |
681 | list_add(&policy->walk.all, &net->xfrm.policy_all); | 682 | list_add(&policy->walk.all, &net->xfrm.policy_all); |
682 | write_unlock_bh(&xfrm_policy_lock); | 683 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
683 | 684 | ||
684 | if (delpol) | 685 | if (delpol) |
685 | xfrm_policy_kill(delpol); | 686 | xfrm_policy_kill(delpol); |
@@ -699,7 +700,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
699 | struct hlist_head *chain; | 700 | struct hlist_head *chain; |
700 | 701 | ||
701 | *err = 0; | 702 | *err = 0; |
702 | write_lock_bh(&xfrm_policy_lock); | 703 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
703 | chain = policy_hash_bysel(net, sel, sel->family, dir); | 704 | chain = policy_hash_bysel(net, sel, sel->family, dir); |
704 | ret = NULL; | 705 | ret = NULL; |
705 | hlist_for_each_entry(pol, chain, bydst) { | 706 | hlist_for_each_entry(pol, chain, bydst) { |
@@ -712,7 +713,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
712 | *err = security_xfrm_policy_delete( | 713 | *err = security_xfrm_policy_delete( |
713 | pol->security); | 714 | pol->security); |
714 | if (*err) { | 715 | if (*err) { |
715 | write_unlock_bh(&xfrm_policy_lock); | 716 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
716 | return pol; | 717 | return pol; |
717 | } | 718 | } |
718 | __xfrm_policy_unlink(pol, dir); | 719 | __xfrm_policy_unlink(pol, dir); |
@@ -721,7 +722,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
721 | break; | 722 | break; |
722 | } | 723 | } |
723 | } | 724 | } |
724 | write_unlock_bh(&xfrm_policy_lock); | 725 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
725 | 726 | ||
726 | if (ret && delete) | 727 | if (ret && delete) |
727 | xfrm_policy_kill(ret); | 728 | xfrm_policy_kill(ret); |
@@ -740,7 +741,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
740 | return NULL; | 741 | return NULL; |
741 | 742 | ||
742 | *err = 0; | 743 | *err = 0; |
743 | write_lock_bh(&xfrm_policy_lock); | 744 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
744 | chain = net->xfrm.policy_byidx + idx_hash(net, id); | 745 | chain = net->xfrm.policy_byidx + idx_hash(net, id); |
745 | ret = NULL; | 746 | ret = NULL; |
746 | hlist_for_each_entry(pol, chain, byidx) { | 747 | hlist_for_each_entry(pol, chain, byidx) { |
@@ -751,7 +752,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
751 | *err = security_xfrm_policy_delete( | 752 | *err = security_xfrm_policy_delete( |
752 | pol->security); | 753 | pol->security); |
753 | if (*err) { | 754 | if (*err) { |
754 | write_unlock_bh(&xfrm_policy_lock); | 755 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
755 | return pol; | 756 | return pol; |
756 | } | 757 | } |
757 | __xfrm_policy_unlink(pol, dir); | 758 | __xfrm_policy_unlink(pol, dir); |
@@ -760,7 +761,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
760 | break; | 761 | break; |
761 | } | 762 | } |
762 | } | 763 | } |
763 | write_unlock_bh(&xfrm_policy_lock); | 764 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
764 | 765 | ||
765 | if (ret && delete) | 766 | if (ret && delete) |
766 | xfrm_policy_kill(ret); | 767 | xfrm_policy_kill(ret); |
@@ -823,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
823 | { | 824 | { |
824 | int dir, err = 0, cnt = 0; | 825 | int dir, err = 0, cnt = 0; |
825 | 826 | ||
826 | write_lock_bh(&xfrm_policy_lock); | 827 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
827 | 828 | ||
828 | err = xfrm_policy_flush_secctx_check(net, type, audit_info); | 829 | err = xfrm_policy_flush_secctx_check(net, type, audit_info); |
829 | if (err) | 830 | if (err) |
@@ -839,7 +840,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
839 | if (pol->type != type) | 840 | if (pol->type != type) |
840 | continue; | 841 | continue; |
841 | __xfrm_policy_unlink(pol, dir); | 842 | __xfrm_policy_unlink(pol, dir); |
842 | write_unlock_bh(&xfrm_policy_lock); | 843 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
843 | cnt++; | 844 | cnt++; |
844 | 845 | ||
845 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, | 846 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, |
@@ -848,7 +849,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
848 | 849 | ||
849 | xfrm_policy_kill(pol); | 850 | xfrm_policy_kill(pol); |
850 | 851 | ||
851 | write_lock_bh(&xfrm_policy_lock); | 852 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
852 | goto again1; | 853 | goto again1; |
853 | } | 854 | } |
854 | 855 | ||
@@ -860,7 +861,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
860 | if (pol->type != type) | 861 | if (pol->type != type) |
861 | continue; | 862 | continue; |
862 | __xfrm_policy_unlink(pol, dir); | 863 | __xfrm_policy_unlink(pol, dir); |
863 | write_unlock_bh(&xfrm_policy_lock); | 864 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
864 | cnt++; | 865 | cnt++; |
865 | 866 | ||
866 | xfrm_audit_policy_delete(pol, 1, | 867 | xfrm_audit_policy_delete(pol, 1, |
@@ -869,7 +870,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
869 | audit_info->secid); | 870 | audit_info->secid); |
870 | xfrm_policy_kill(pol); | 871 | xfrm_policy_kill(pol); |
871 | 872 | ||
872 | write_lock_bh(&xfrm_policy_lock); | 873 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
873 | goto again2; | 874 | goto again2; |
874 | } | 875 | } |
875 | } | 876 | } |
@@ -878,7 +879,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
878 | if (!cnt) | 879 | if (!cnt) |
879 | err = -ESRCH; | 880 | err = -ESRCH; |
880 | out: | 881 | out: |
881 | write_unlock_bh(&xfrm_policy_lock); | 882 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
882 | return err; | 883 | return err; |
883 | } | 884 | } |
884 | EXPORT_SYMBOL(xfrm_policy_flush); | 885 | EXPORT_SYMBOL(xfrm_policy_flush); |
@@ -898,7 +899,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, | |||
898 | if (list_empty(&walk->walk.all) && walk->seq != 0) | 899 | if (list_empty(&walk->walk.all) && walk->seq != 0) |
899 | return 0; | 900 | return 0; |
900 | 901 | ||
901 | write_lock_bh(&xfrm_policy_lock); | 902 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
902 | if (list_empty(&walk->walk.all)) | 903 | if (list_empty(&walk->walk.all)) |
903 | x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); | 904 | x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); |
904 | else | 905 | else |
@@ -924,7 +925,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, | |||
924 | } | 925 | } |
925 | list_del_init(&walk->walk.all); | 926 | list_del_init(&walk->walk.all); |
926 | out: | 927 | out: |
927 | write_unlock_bh(&xfrm_policy_lock); | 928 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
928 | return error; | 929 | return error; |
929 | } | 930 | } |
930 | EXPORT_SYMBOL(xfrm_policy_walk); | 931 | EXPORT_SYMBOL(xfrm_policy_walk); |
@@ -938,14 +939,14 @@ void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) | |||
938 | } | 939 | } |
939 | EXPORT_SYMBOL(xfrm_policy_walk_init); | 940 | EXPORT_SYMBOL(xfrm_policy_walk_init); |
940 | 941 | ||
941 | void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) | 942 | void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net) |
942 | { | 943 | { |
943 | if (list_empty(&walk->walk.all)) | 944 | if (list_empty(&walk->walk.all)) |
944 | return; | 945 | return; |
945 | 946 | ||
946 | write_lock_bh(&xfrm_policy_lock); | 947 | write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */ |
947 | list_del(&walk->walk.all); | 948 | list_del(&walk->walk.all); |
948 | write_unlock_bh(&xfrm_policy_lock); | 949 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
949 | } | 950 | } |
950 | EXPORT_SYMBOL(xfrm_policy_walk_done); | 951 | EXPORT_SYMBOL(xfrm_policy_walk_done); |
951 | 952 | ||
@@ -990,7 +991,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
990 | if (unlikely(!daddr || !saddr)) | 991 | if (unlikely(!daddr || !saddr)) |
991 | return NULL; | 992 | return NULL; |
992 | 993 | ||
993 | read_lock_bh(&xfrm_policy_lock); | 994 | read_lock_bh(&net->xfrm.xfrm_policy_lock); |
994 | chain = policy_hash_direct(net, daddr, saddr, family, dir); | 995 | chain = policy_hash_direct(net, daddr, saddr, family, dir); |
995 | ret = NULL; | 996 | ret = NULL; |
996 | hlist_for_each_entry(pol, chain, bydst) { | 997 | hlist_for_each_entry(pol, chain, bydst) { |
@@ -1026,7 +1027,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, | |||
1026 | if (ret) | 1027 | if (ret) |
1027 | xfrm_pol_hold(ret); | 1028 | xfrm_pol_hold(ret); |
1028 | fail: | 1029 | fail: |
1029 | read_unlock_bh(&xfrm_policy_lock); | 1030 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); |
1030 | 1031 | ||
1031 | return ret; | 1032 | return ret; |
1032 | } | 1033 | } |
@@ -1103,8 +1104,9 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, | |||
1103 | const struct flowi *fl) | 1104 | const struct flowi *fl) |
1104 | { | 1105 | { |
1105 | struct xfrm_policy *pol; | 1106 | struct xfrm_policy *pol; |
1107 | struct net *net = sock_net(sk); | ||
1106 | 1108 | ||
1107 | read_lock_bh(&xfrm_policy_lock); | 1109 | read_lock_bh(&net->xfrm.xfrm_policy_lock); |
1108 | if ((pol = sk->sk_policy[dir]) != NULL) { | 1110 | if ((pol = sk->sk_policy[dir]) != NULL) { |
1109 | bool match = xfrm_selector_match(&pol->selector, fl, | 1111 | bool match = xfrm_selector_match(&pol->selector, fl, |
1110 | sk->sk_family); | 1112 | sk->sk_family); |
@@ -1128,7 +1130,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, | |||
1128 | pol = NULL; | 1130 | pol = NULL; |
1129 | } | 1131 | } |
1130 | out: | 1132 | out: |
1131 | read_unlock_bh(&xfrm_policy_lock); | 1133 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); |
1132 | return pol; | 1134 | return pol; |
1133 | } | 1135 | } |
1134 | 1136 | ||
@@ -1166,9 +1168,11 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, | |||
1166 | 1168 | ||
1167 | int xfrm_policy_delete(struct xfrm_policy *pol, int dir) | 1169 | int xfrm_policy_delete(struct xfrm_policy *pol, int dir) |
1168 | { | 1170 | { |
1169 | write_lock_bh(&xfrm_policy_lock); | 1171 | struct net *net = xp_net(pol); |
1172 | |||
1173 | write_lock_bh(&net->xfrm.xfrm_policy_lock); | ||
1170 | pol = __xfrm_policy_unlink(pol, dir); | 1174 | pol = __xfrm_policy_unlink(pol, dir); |
1171 | write_unlock_bh(&xfrm_policy_lock); | 1175 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
1172 | if (pol) { | 1176 | if (pol) { |
1173 | xfrm_policy_kill(pol); | 1177 | xfrm_policy_kill(pol); |
1174 | return 0; | 1178 | return 0; |
@@ -1187,12 +1191,12 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
1187 | return -EINVAL; | 1191 | return -EINVAL; |
1188 | #endif | 1192 | #endif |
1189 | 1193 | ||
1190 | write_lock_bh(&xfrm_policy_lock); | 1194 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
1191 | old_pol = sk->sk_policy[dir]; | 1195 | old_pol = sk->sk_policy[dir]; |
1192 | sk->sk_policy[dir] = pol; | 1196 | sk->sk_policy[dir] = pol; |
1193 | if (pol) { | 1197 | if (pol) { |
1194 | pol->curlft.add_time = get_seconds(); | 1198 | pol->curlft.add_time = get_seconds(); |
1195 | pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); | 1199 | pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); |
1196 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); | 1200 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); |
1197 | } | 1201 | } |
1198 | if (old_pol) { | 1202 | if (old_pol) { |
@@ -1204,7 +1208,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
1204 | */ | 1208 | */ |
1205 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); | 1209 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); |
1206 | } | 1210 | } |
1207 | write_unlock_bh(&xfrm_policy_lock); | 1211 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
1208 | 1212 | ||
1209 | if (old_pol) { | 1213 | if (old_pol) { |
1210 | xfrm_policy_kill(old_pol); | 1214 | xfrm_policy_kill(old_pol); |
@@ -1215,6 +1219,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
1215 | static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) | 1219 | static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) |
1216 | { | 1220 | { |
1217 | struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); | 1221 | struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); |
1222 | struct net *net = xp_net(old); | ||
1218 | 1223 | ||
1219 | if (newp) { | 1224 | if (newp) { |
1220 | newp->selector = old->selector; | 1225 | newp->selector = old->selector; |
@@ -1233,9 +1238,9 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) | |||
1233 | newp->type = old->type; | 1238 | newp->type = old->type; |
1234 | memcpy(newp->xfrm_vec, old->xfrm_vec, | 1239 | memcpy(newp->xfrm_vec, old->xfrm_vec, |
1235 | newp->xfrm_nr*sizeof(struct xfrm_tmpl)); | 1240 | newp->xfrm_nr*sizeof(struct xfrm_tmpl)); |
1236 | write_lock_bh(&xfrm_policy_lock); | 1241 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
1237 | __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); | 1242 | __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); |
1238 | write_unlock_bh(&xfrm_policy_lock); | 1243 | write_unlock_bh(&net->xfrm.xfrm_policy_lock); |
1239 | xfrm_pol_put(newp); | 1244 | xfrm_pol_put(newp); |
1240 | } | 1245 | } |
1241 | return newp; | 1246 | return newp; |
@@ -1896,8 +1901,7 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, | |||
1896 | if (IS_ERR(xdst)) | 1901 | if (IS_ERR(xdst)) |
1897 | return xdst; | 1902 | return xdst; |
1898 | 1903 | ||
1899 | if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 || | 1904 | if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0) |
1900 | (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP)) | ||
1901 | return xdst; | 1905 | return xdst; |
1902 | 1906 | ||
1903 | dst1 = &xdst->u.dst; | 1907 | dst1 = &xdst->u.dst; |
@@ -2072,7 +2076,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2072 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); | 2076 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); |
2073 | int i, err, num_pols, num_xfrms = 0, drop_pols = 0; | 2077 | int i, err, num_pols, num_xfrms = 0, drop_pols = 0; |
2074 | 2078 | ||
2075 | restart: | ||
2076 | dst = NULL; | 2079 | dst = NULL; |
2077 | xdst = NULL; | 2080 | xdst = NULL; |
2078 | route = NULL; | 2081 | route = NULL; |
@@ -2106,10 +2109,10 @@ restart: | |||
2106 | 2109 | ||
2107 | dst_hold(&xdst->u.dst); | 2110 | dst_hold(&xdst->u.dst); |
2108 | 2111 | ||
2109 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); | 2112 | spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); |
2110 | xdst->u.dst.next = xfrm_policy_sk_bundles; | 2113 | xdst->u.dst.next = xfrm_policy_sk_bundles; |
2111 | xfrm_policy_sk_bundles = &xdst->u.dst; | 2114 | xfrm_policy_sk_bundles = &xdst->u.dst; |
2112 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | 2115 | spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); |
2113 | 2116 | ||
2114 | route = xdst->route; | 2117 | route = xdst->route; |
2115 | } | 2118 | } |
@@ -2152,23 +2155,8 @@ restart: | |||
2152 | 2155 | ||
2153 | return make_blackhole(net, family, dst_orig); | 2156 | return make_blackhole(net, family, dst_orig); |
2154 | } | 2157 | } |
2155 | if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { | ||
2156 | DECLARE_WAITQUEUE(wait, current); | ||
2157 | |||
2158 | add_wait_queue(&net->xfrm.km_waitq, &wait); | ||
2159 | set_current_state(TASK_INTERRUPTIBLE); | ||
2160 | schedule(); | ||
2161 | set_current_state(TASK_RUNNING); | ||
2162 | remove_wait_queue(&net->xfrm.km_waitq, &wait); | ||
2163 | 2158 | ||
2164 | if (!signal_pending(current)) { | 2159 | err = -EAGAIN; |
2165 | dst_release(dst); | ||
2166 | goto restart; | ||
2167 | } | ||
2168 | |||
2169 | err = -ERESTART; | ||
2170 | } else | ||
2171 | err = -EAGAIN; | ||
2172 | 2160 | ||
2173 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | 2161 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); |
2174 | goto error; | 2162 | goto error; |
@@ -2434,7 +2422,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
2434 | } | 2422 | } |
2435 | xfrm_nr = ti; | 2423 | xfrm_nr = ti; |
2436 | if (npols > 1) { | 2424 | if (npols > 1) { |
2437 | xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); | 2425 | xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net); |
2438 | tpp = stp; | 2426 | tpp = stp; |
2439 | } | 2427 | } |
2440 | 2428 | ||
@@ -2563,10 +2551,10 @@ static void __xfrm_garbage_collect(struct net *net) | |||
2563 | { | 2551 | { |
2564 | struct dst_entry *head, *next; | 2552 | struct dst_entry *head, *next; |
2565 | 2553 | ||
2566 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); | 2554 | spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); |
2567 | head = xfrm_policy_sk_bundles; | 2555 | head = xfrm_policy_sk_bundles; |
2568 | xfrm_policy_sk_bundles = NULL; | 2556 | xfrm_policy_sk_bundles = NULL; |
2569 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | 2557 | spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock); |
2570 | 2558 | ||
2571 | while (head) { | 2559 | while (head) { |
2572 | next = head->next; | 2560 | next = head->next; |
@@ -2950,6 +2938,13 @@ static int __net_init xfrm_net_init(struct net *net) | |||
2950 | rv = xfrm_sysctl_init(net); | 2938 | rv = xfrm_sysctl_init(net); |
2951 | if (rv < 0) | 2939 | if (rv < 0) |
2952 | goto out_sysctl; | 2940 | goto out_sysctl; |
2941 | |||
2942 | /* Initialize the per-net locks here */ | ||
2943 | spin_lock_init(&net->xfrm.xfrm_state_lock); | ||
2944 | rwlock_init(&net->xfrm.xfrm_policy_lock); | ||
2945 | spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock); | ||
2946 | mutex_init(&net->xfrm.xfrm_cfg_mutex); | ||
2947 | |||
2953 | return 0; | 2948 | return 0; |
2954 | 2949 | ||
2955 | out_sysctl: | 2950 | out_sysctl: |
@@ -3070,14 +3065,14 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, | |||
3070 | } | 3065 | } |
3071 | 3066 | ||
3072 | static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, | 3067 | static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, |
3073 | u8 dir, u8 type) | 3068 | u8 dir, u8 type, struct net *net) |
3074 | { | 3069 | { |
3075 | struct xfrm_policy *pol, *ret = NULL; | 3070 | struct xfrm_policy *pol, *ret = NULL; |
3076 | struct hlist_head *chain; | 3071 | struct hlist_head *chain; |
3077 | u32 priority = ~0U; | 3072 | u32 priority = ~0U; |
3078 | 3073 | ||
3079 | read_lock_bh(&xfrm_policy_lock); | 3074 | read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/ |
3080 | chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); | 3075 | chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); |
3081 | hlist_for_each_entry(pol, chain, bydst) { | 3076 | hlist_for_each_entry(pol, chain, bydst) { |
3082 | if (xfrm_migrate_selector_match(sel, &pol->selector) && | 3077 | if (xfrm_migrate_selector_match(sel, &pol->selector) && |
3083 | pol->type == type) { | 3078 | pol->type == type) { |
@@ -3086,7 +3081,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector | |||
3086 | break; | 3081 | break; |
3087 | } | 3082 | } |
3088 | } | 3083 | } |
3089 | chain = &init_net.xfrm.policy_inexact[dir]; | 3084 | chain = &net->xfrm.policy_inexact[dir]; |
3090 | hlist_for_each_entry(pol, chain, bydst) { | 3085 | hlist_for_each_entry(pol, chain, bydst) { |
3091 | if (xfrm_migrate_selector_match(sel, &pol->selector) && | 3086 | if (xfrm_migrate_selector_match(sel, &pol->selector) && |
3092 | pol->type == type && | 3087 | pol->type == type && |
@@ -3099,7 +3094,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector | |||
3099 | if (ret) | 3094 | if (ret) |
3100 | xfrm_pol_hold(ret); | 3095 | xfrm_pol_hold(ret); |
3101 | 3096 | ||
3102 | read_unlock_bh(&xfrm_policy_lock); | 3097 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); |
3103 | 3098 | ||
3104 | return ret; | 3099 | return ret; |
3105 | } | 3100 | } |
@@ -3210,7 +3205,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) | |||
3210 | 3205 | ||
3211 | int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, | 3206 | int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, |
3212 | struct xfrm_migrate *m, int num_migrate, | 3207 | struct xfrm_migrate *m, int num_migrate, |
3213 | struct xfrm_kmaddress *k) | 3208 | struct xfrm_kmaddress *k, struct net *net) |
3214 | { | 3209 | { |
3215 | int i, err, nx_cur = 0, nx_new = 0; | 3210 | int i, err, nx_cur = 0, nx_new = 0; |
3216 | struct xfrm_policy *pol = NULL; | 3211 | struct xfrm_policy *pol = NULL; |
@@ -3223,14 +3218,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, | |||
3223 | goto out; | 3218 | goto out; |
3224 | 3219 | ||
3225 | /* Stage 1 - find policy */ | 3220 | /* Stage 1 - find policy */ |
3226 | if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { | 3221 | if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { |
3227 | err = -ENOENT; | 3222 | err = -ENOENT; |
3228 | goto out; | 3223 | goto out; |
3229 | } | 3224 | } |
3230 | 3225 | ||
3231 | /* Stage 2 - find and update state(s) */ | 3226 | /* Stage 2 - find and update state(s) */ |
3232 | for (i = 0, mp = m; i < num_migrate; i++, mp++) { | 3227 | for (i = 0, mp = m; i < num_migrate; i++, mp++) { |
3233 | if ((x = xfrm_migrate_state_find(mp))) { | 3228 | if ((x = xfrm_migrate_state_find(mp, net))) { |
3234 | x_cur[nx_cur] = x; | 3229 | x_cur[nx_cur] = x; |
3235 | nx_cur++; | 3230 | nx_cur++; |
3236 | if ((xc = xfrm_state_migrate(x, mp))) { | 3231 | if ((xc = xfrm_state_migrate(x, mp))) { |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 68c2f357a183..a62c25ea3631 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -35,8 +35,6 @@ | |||
35 | destination/tunnel endpoint. (output) | 35 | destination/tunnel endpoint. (output) |
36 | */ | 36 | */ |
37 | 37 | ||
38 | static DEFINE_SPINLOCK(xfrm_state_lock); | ||
39 | |||
40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | 38 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; |
41 | 39 | ||
42 | static inline unsigned int xfrm_dst_hash(struct net *net, | 40 | static inline unsigned int xfrm_dst_hash(struct net *net, |
@@ -127,7 +125,7 @@ static void xfrm_hash_resize(struct work_struct *work) | |||
127 | goto out_unlock; | 125 | goto out_unlock; |
128 | } | 126 | } |
129 | 127 | ||
130 | spin_lock_bh(&xfrm_state_lock); | 128 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
131 | 129 | ||
132 | nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; | 130 | nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; |
133 | for (i = net->xfrm.state_hmask; i >= 0; i--) | 131 | for (i = net->xfrm.state_hmask; i >= 0; i--) |
@@ -144,7 +142,7 @@ static void xfrm_hash_resize(struct work_struct *work) | |||
144 | net->xfrm.state_byspi = nspi; | 142 | net->xfrm.state_byspi = nspi; |
145 | net->xfrm.state_hmask = nhashmask; | 143 | net->xfrm.state_hmask = nhashmask; |
146 | 144 | ||
147 | spin_unlock_bh(&xfrm_state_lock); | 145 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
148 | 146 | ||
149 | osize = (ohashmask + 1) * sizeof(struct hlist_head); | 147 | osize = (ohashmask + 1) * sizeof(struct hlist_head); |
150 | xfrm_hash_free(odst, osize); | 148 | xfrm_hash_free(odst, osize); |
@@ -374,8 +372,6 @@ static void xfrm_state_gc_task(struct work_struct *work) | |||
374 | 372 | ||
375 | hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) | 373 | hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) |
376 | xfrm_state_gc_destroy(x); | 374 | xfrm_state_gc_destroy(x); |
377 | |||
378 | wake_up(&net->xfrm.km_waitq); | ||
379 | } | 375 | } |
380 | 376 | ||
381 | static inline unsigned long make_jiffies(long secs) | 377 | static inline unsigned long make_jiffies(long secs) |
@@ -390,7 +386,6 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) | |||
390 | { | 386 | { |
391 | struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); | 387 | struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); |
392 | struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); | 388 | struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); |
393 | struct net *net = xs_net(x); | ||
394 | unsigned long now = get_seconds(); | 389 | unsigned long now = get_seconds(); |
395 | long next = LONG_MAX; | 390 | long next = LONG_MAX; |
396 | int warn = 0; | 391 | int warn = 0; |
@@ -460,12 +455,8 @@ resched: | |||
460 | goto out; | 455 | goto out; |
461 | 456 | ||
462 | expired: | 457 | expired: |
463 | if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) { | 458 | if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) |
464 | x->km.state = XFRM_STATE_EXPIRED; | 459 | x->km.state = XFRM_STATE_EXPIRED; |
465 | wake_up(&net->xfrm.km_waitq); | ||
466 | next = 2; | ||
467 | goto resched; | ||
468 | } | ||
469 | 460 | ||
470 | err = __xfrm_state_delete(x); | 461 | err = __xfrm_state_delete(x); |
471 | if (!err) | 462 | if (!err) |
@@ -535,14 +526,14 @@ int __xfrm_state_delete(struct xfrm_state *x) | |||
535 | 526 | ||
536 | if (x->km.state != XFRM_STATE_DEAD) { | 527 | if (x->km.state != XFRM_STATE_DEAD) { |
537 | x->km.state = XFRM_STATE_DEAD; | 528 | x->km.state = XFRM_STATE_DEAD; |
538 | spin_lock(&xfrm_state_lock); | 529 | spin_lock(&net->xfrm.xfrm_state_lock); |
539 | list_del(&x->km.all); | 530 | list_del(&x->km.all); |
540 | hlist_del(&x->bydst); | 531 | hlist_del(&x->bydst); |
541 | hlist_del(&x->bysrc); | 532 | hlist_del(&x->bysrc); |
542 | if (x->id.spi) | 533 | if (x->id.spi) |
543 | hlist_del(&x->byspi); | 534 | hlist_del(&x->byspi); |
544 | net->xfrm.state_num--; | 535 | net->xfrm.state_num--; |
545 | spin_unlock(&xfrm_state_lock); | 536 | spin_unlock(&net->xfrm.xfrm_state_lock); |
546 | 537 | ||
547 | /* All xfrm_state objects are created by xfrm_state_alloc. | 538 | /* All xfrm_state objects are created by xfrm_state_alloc. |
548 | * The xfrm_state_alloc call gives a reference, and that | 539 | * The xfrm_state_alloc call gives a reference, and that |
@@ -603,7 +594,7 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) | |||
603 | { | 594 | { |
604 | int i, err = 0, cnt = 0; | 595 | int i, err = 0, cnt = 0; |
605 | 596 | ||
606 | spin_lock_bh(&xfrm_state_lock); | 597 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
607 | err = xfrm_state_flush_secctx_check(net, proto, audit_info); | 598 | err = xfrm_state_flush_secctx_check(net, proto, audit_info); |
608 | if (err) | 599 | if (err) |
609 | goto out; | 600 | goto out; |
@@ -616,7 +607,7 @@ restart: | |||
616 | if (!xfrm_state_kern(x) && | 607 | if (!xfrm_state_kern(x) && |
617 | xfrm_id_proto_match(x->id.proto, proto)) { | 608 | xfrm_id_proto_match(x->id.proto, proto)) { |
618 | xfrm_state_hold(x); | 609 | xfrm_state_hold(x); |
619 | spin_unlock_bh(&xfrm_state_lock); | 610 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
620 | 611 | ||
621 | err = xfrm_state_delete(x); | 612 | err = xfrm_state_delete(x); |
622 | xfrm_audit_state_delete(x, err ? 0 : 1, | 613 | xfrm_audit_state_delete(x, err ? 0 : 1, |
@@ -627,7 +618,7 @@ restart: | |||
627 | if (!err) | 618 | if (!err) |
628 | cnt++; | 619 | cnt++; |
629 | 620 | ||
630 | spin_lock_bh(&xfrm_state_lock); | 621 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
631 | goto restart; | 622 | goto restart; |
632 | } | 623 | } |
633 | } | 624 | } |
@@ -636,19 +627,18 @@ restart: | |||
636 | err = 0; | 627 | err = 0; |
637 | 628 | ||
638 | out: | 629 | out: |
639 | spin_unlock_bh(&xfrm_state_lock); | 630 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
640 | wake_up(&net->xfrm.km_waitq); | ||
641 | return err; | 631 | return err; |
642 | } | 632 | } |
643 | EXPORT_SYMBOL(xfrm_state_flush); | 633 | EXPORT_SYMBOL(xfrm_state_flush); |
644 | 634 | ||
645 | void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) | 635 | void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) |
646 | { | 636 | { |
647 | spin_lock_bh(&xfrm_state_lock); | 637 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
648 | si->sadcnt = net->xfrm.state_num; | 638 | si->sadcnt = net->xfrm.state_num; |
649 | si->sadhcnt = net->xfrm.state_hmask; | 639 | si->sadhcnt = net->xfrm.state_hmask; |
650 | si->sadhmcnt = xfrm_state_hashmax; | 640 | si->sadhmcnt = xfrm_state_hashmax; |
651 | spin_unlock_bh(&xfrm_state_lock); | 641 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
652 | } | 642 | } |
653 | EXPORT_SYMBOL(xfrm_sad_getinfo); | 643 | EXPORT_SYMBOL(xfrm_sad_getinfo); |
654 | 644 | ||
@@ -801,7 +791,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, | |||
801 | 791 | ||
802 | to_put = NULL; | 792 | to_put = NULL; |
803 | 793 | ||
804 | spin_lock_bh(&xfrm_state_lock); | 794 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
805 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); | 795 | h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); |
806 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { | 796 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
807 | if (x->props.family == encap_family && | 797 | if (x->props.family == encap_family && |
@@ -886,7 +876,7 @@ out: | |||
886 | xfrm_state_hold(x); | 876 | xfrm_state_hold(x); |
887 | else | 877 | else |
888 | *err = acquire_in_progress ? -EAGAIN : error; | 878 | *err = acquire_in_progress ? -EAGAIN : error; |
889 | spin_unlock_bh(&xfrm_state_lock); | 879 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
890 | if (to_put) | 880 | if (to_put) |
891 | xfrm_state_put(to_put); | 881 | xfrm_state_put(to_put); |
892 | return x; | 882 | return x; |
@@ -900,7 +890,7 @@ xfrm_stateonly_find(struct net *net, u32 mark, | |||
900 | unsigned int h; | 890 | unsigned int h; |
901 | struct xfrm_state *rx = NULL, *x = NULL; | 891 | struct xfrm_state *rx = NULL, *x = NULL; |
902 | 892 | ||
903 | spin_lock(&xfrm_state_lock); | 893 | spin_lock(&net->xfrm.xfrm_state_lock); |
904 | h = xfrm_dst_hash(net, daddr, saddr, reqid, family); | 894 | h = xfrm_dst_hash(net, daddr, saddr, reqid, family); |
905 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { | 895 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
906 | if (x->props.family == family && | 896 | if (x->props.family == family && |
@@ -918,7 +908,7 @@ xfrm_stateonly_find(struct net *net, u32 mark, | |||
918 | 908 | ||
919 | if (rx) | 909 | if (rx) |
920 | xfrm_state_hold(rx); | 910 | xfrm_state_hold(rx); |
921 | spin_unlock(&xfrm_state_lock); | 911 | spin_unlock(&net->xfrm.xfrm_state_lock); |
922 | 912 | ||
923 | 913 | ||
924 | return rx; | 914 | return rx; |
@@ -950,14 +940,12 @@ static void __xfrm_state_insert(struct xfrm_state *x) | |||
950 | if (x->replay_maxage) | 940 | if (x->replay_maxage) |
951 | mod_timer(&x->rtimer, jiffies + x->replay_maxage); | 941 | mod_timer(&x->rtimer, jiffies + x->replay_maxage); |
952 | 942 | ||
953 | wake_up(&net->xfrm.km_waitq); | ||
954 | |||
955 | net->xfrm.state_num++; | 943 | net->xfrm.state_num++; |
956 | 944 | ||
957 | xfrm_hash_grow_check(net, x->bydst.next != NULL); | 945 | xfrm_hash_grow_check(net, x->bydst.next != NULL); |
958 | } | 946 | } |
959 | 947 | ||
960 | /* xfrm_state_lock is held */ | 948 | /* net->xfrm.xfrm_state_lock is held */ |
961 | static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | 949 | static void __xfrm_state_bump_genids(struct xfrm_state *xnew) |
962 | { | 950 | { |
963 | struct net *net = xs_net(xnew); | 951 | struct net *net = xs_net(xnew); |
@@ -980,14 +968,16 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | |||
980 | 968 | ||
981 | void xfrm_state_insert(struct xfrm_state *x) | 969 | void xfrm_state_insert(struct xfrm_state *x) |
982 | { | 970 | { |
983 | spin_lock_bh(&xfrm_state_lock); | 971 | struct net *net = xs_net(x); |
972 | |||
973 | spin_lock_bh(&net->xfrm.xfrm_state_lock); | ||
984 | __xfrm_state_bump_genids(x); | 974 | __xfrm_state_bump_genids(x); |
985 | __xfrm_state_insert(x); | 975 | __xfrm_state_insert(x); |
986 | spin_unlock_bh(&xfrm_state_lock); | 976 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
987 | } | 977 | } |
988 | EXPORT_SYMBOL(xfrm_state_insert); | 978 | EXPORT_SYMBOL(xfrm_state_insert); |
989 | 979 | ||
990 | /* xfrm_state_lock is held */ | 980 | /* net->xfrm.xfrm_state_lock is held */ |
991 | static struct xfrm_state *__find_acq_core(struct net *net, | 981 | static struct xfrm_state *__find_acq_core(struct net *net, |
992 | const struct xfrm_mark *m, | 982 | const struct xfrm_mark *m, |
993 | unsigned short family, u8 mode, | 983 | unsigned short family, u8 mode, |
@@ -1079,7 +1069,7 @@ int xfrm_state_add(struct xfrm_state *x) | |||
1079 | 1069 | ||
1080 | to_put = NULL; | 1070 | to_put = NULL; |
1081 | 1071 | ||
1082 | spin_lock_bh(&xfrm_state_lock); | 1072 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1083 | 1073 | ||
1084 | x1 = __xfrm_state_locate(x, use_spi, family); | 1074 | x1 = __xfrm_state_locate(x, use_spi, family); |
1085 | if (x1) { | 1075 | if (x1) { |
@@ -1108,7 +1098,7 @@ int xfrm_state_add(struct xfrm_state *x) | |||
1108 | err = 0; | 1098 | err = 0; |
1109 | 1099 | ||
1110 | out: | 1100 | out: |
1111 | spin_unlock_bh(&xfrm_state_lock); | 1101 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1112 | 1102 | ||
1113 | if (x1) { | 1103 | if (x1) { |
1114 | xfrm_state_delete(x1); | 1104 | xfrm_state_delete(x1); |
@@ -1203,16 +1193,16 @@ out: | |||
1203 | return NULL; | 1193 | return NULL; |
1204 | } | 1194 | } |
1205 | 1195 | ||
1206 | /* xfrm_state_lock is held */ | 1196 | /* net->xfrm.xfrm_state_lock is held */ |
1207 | struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) | 1197 | struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) |
1208 | { | 1198 | { |
1209 | unsigned int h; | 1199 | unsigned int h; |
1210 | struct xfrm_state *x; | 1200 | struct xfrm_state *x; |
1211 | 1201 | ||
1212 | if (m->reqid) { | 1202 | if (m->reqid) { |
1213 | h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, | 1203 | h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, |
1214 | m->reqid, m->old_family); | 1204 | m->reqid, m->old_family); |
1215 | hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) { | 1205 | hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { |
1216 | if (x->props.mode != m->mode || | 1206 | if (x->props.mode != m->mode || |
1217 | x->id.proto != m->proto) | 1207 | x->id.proto != m->proto) |
1218 | continue; | 1208 | continue; |
@@ -1227,9 +1217,9 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) | |||
1227 | return x; | 1217 | return x; |
1228 | } | 1218 | } |
1229 | } else { | 1219 | } else { |
1230 | h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, | 1220 | h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, |
1231 | m->old_family); | 1221 | m->old_family); |
1232 | hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) { | 1222 | hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { |
1233 | if (x->props.mode != m->mode || | 1223 | if (x->props.mode != m->mode || |
1234 | x->id.proto != m->proto) | 1224 | x->id.proto != m->proto) |
1235 | continue; | 1225 | continue; |
@@ -1283,10 +1273,11 @@ int xfrm_state_update(struct xfrm_state *x) | |||
1283 | struct xfrm_state *x1, *to_put; | 1273 | struct xfrm_state *x1, *to_put; |
1284 | int err; | 1274 | int err; |
1285 | int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); | 1275 | int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); |
1276 | struct net *net = xs_net(x); | ||
1286 | 1277 | ||
1287 | to_put = NULL; | 1278 | to_put = NULL; |
1288 | 1279 | ||
1289 | spin_lock_bh(&xfrm_state_lock); | 1280 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1290 | x1 = __xfrm_state_locate(x, use_spi, x->props.family); | 1281 | x1 = __xfrm_state_locate(x, use_spi, x->props.family); |
1291 | 1282 | ||
1292 | err = -ESRCH; | 1283 | err = -ESRCH; |
@@ -1306,7 +1297,7 @@ int xfrm_state_update(struct xfrm_state *x) | |||
1306 | err = 0; | 1297 | err = 0; |
1307 | 1298 | ||
1308 | out: | 1299 | out: |
1309 | spin_unlock_bh(&xfrm_state_lock); | 1300 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1310 | 1301 | ||
1311 | if (to_put) | 1302 | if (to_put) |
1312 | xfrm_state_put(to_put); | 1303 | xfrm_state_put(to_put); |
@@ -1377,9 +1368,9 @@ xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 | |||
1377 | { | 1368 | { |
1378 | struct xfrm_state *x; | 1369 | struct xfrm_state *x; |
1379 | 1370 | ||
1380 | spin_lock_bh(&xfrm_state_lock); | 1371 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1381 | x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family); | 1372 | x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family); |
1382 | spin_unlock_bh(&xfrm_state_lock); | 1373 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1383 | return x; | 1374 | return x; |
1384 | } | 1375 | } |
1385 | EXPORT_SYMBOL(xfrm_state_lookup); | 1376 | EXPORT_SYMBOL(xfrm_state_lookup); |
@@ -1391,9 +1382,9 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark, | |||
1391 | { | 1382 | { |
1392 | struct xfrm_state *x; | 1383 | struct xfrm_state *x; |
1393 | 1384 | ||
1394 | spin_lock_bh(&xfrm_state_lock); | 1385 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1395 | x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family); | 1386 | x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family); |
1396 | spin_unlock_bh(&xfrm_state_lock); | 1387 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1397 | return x; | 1388 | return x; |
1398 | } | 1389 | } |
1399 | EXPORT_SYMBOL(xfrm_state_lookup_byaddr); | 1390 | EXPORT_SYMBOL(xfrm_state_lookup_byaddr); |
@@ -1405,9 +1396,9 @@ xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, | |||
1405 | { | 1396 | { |
1406 | struct xfrm_state *x; | 1397 | struct xfrm_state *x; |
1407 | 1398 | ||
1408 | spin_lock_bh(&xfrm_state_lock); | 1399 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1409 | x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create); | 1400 | x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create); |
1410 | spin_unlock_bh(&xfrm_state_lock); | 1401 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1411 | 1402 | ||
1412 | return x; | 1403 | return x; |
1413 | } | 1404 | } |
@@ -1416,17 +1407,17 @@ EXPORT_SYMBOL(xfrm_find_acq); | |||
1416 | #ifdef CONFIG_XFRM_SUB_POLICY | 1407 | #ifdef CONFIG_XFRM_SUB_POLICY |
1417 | int | 1408 | int |
1418 | xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, | 1409 | xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, |
1419 | unsigned short family) | 1410 | unsigned short family, struct net *net) |
1420 | { | 1411 | { |
1421 | int err = 0; | 1412 | int err = 0; |
1422 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); | 1413 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); |
1423 | if (!afinfo) | 1414 | if (!afinfo) |
1424 | return -EAFNOSUPPORT; | 1415 | return -EAFNOSUPPORT; |
1425 | 1416 | ||
1426 | spin_lock_bh(&xfrm_state_lock); | 1417 | spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/ |
1427 | if (afinfo->tmpl_sort) | 1418 | if (afinfo->tmpl_sort) |
1428 | err = afinfo->tmpl_sort(dst, src, n); | 1419 | err = afinfo->tmpl_sort(dst, src, n); |
1429 | spin_unlock_bh(&xfrm_state_lock); | 1420 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1430 | xfrm_state_put_afinfo(afinfo); | 1421 | xfrm_state_put_afinfo(afinfo); |
1431 | return err; | 1422 | return err; |
1432 | } | 1423 | } |
@@ -1438,13 +1429,15 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, | |||
1438 | { | 1429 | { |
1439 | int err = 0; | 1430 | int err = 0; |
1440 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); | 1431 | struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); |
1432 | struct net *net = xs_net(*dst); | ||
1433 | |||
1441 | if (!afinfo) | 1434 | if (!afinfo) |
1442 | return -EAFNOSUPPORT; | 1435 | return -EAFNOSUPPORT; |
1443 | 1436 | ||
1444 | spin_lock_bh(&xfrm_state_lock); | 1437 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1445 | if (afinfo->state_sort) | 1438 | if (afinfo->state_sort) |
1446 | err = afinfo->state_sort(dst, src, n); | 1439 | err = afinfo->state_sort(dst, src, n); |
1447 | spin_unlock_bh(&xfrm_state_lock); | 1440 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1448 | xfrm_state_put_afinfo(afinfo); | 1441 | xfrm_state_put_afinfo(afinfo); |
1449 | return err; | 1442 | return err; |
1450 | } | 1443 | } |
@@ -1476,9 +1469,9 @@ struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq) | |||
1476 | { | 1469 | { |
1477 | struct xfrm_state *x; | 1470 | struct xfrm_state *x; |
1478 | 1471 | ||
1479 | spin_lock_bh(&xfrm_state_lock); | 1472 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1480 | x = __xfrm_find_acq_byseq(net, mark, seq); | 1473 | x = __xfrm_find_acq_byseq(net, mark, seq); |
1481 | spin_unlock_bh(&xfrm_state_lock); | 1474 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1482 | return x; | 1475 | return x; |
1483 | } | 1476 | } |
1484 | EXPORT_SYMBOL(xfrm_find_acq_byseq); | 1477 | EXPORT_SYMBOL(xfrm_find_acq_byseq); |
@@ -1496,6 +1489,30 @@ u32 xfrm_get_acqseq(void) | |||
1496 | } | 1489 | } |
1497 | EXPORT_SYMBOL(xfrm_get_acqseq); | 1490 | EXPORT_SYMBOL(xfrm_get_acqseq); |
1498 | 1491 | ||
1492 | int verify_spi_info(u8 proto, u32 min, u32 max) | ||
1493 | { | ||
1494 | switch (proto) { | ||
1495 | case IPPROTO_AH: | ||
1496 | case IPPROTO_ESP: | ||
1497 | break; | ||
1498 | |||
1499 | case IPPROTO_COMP: | ||
1500 | /* IPCOMP spi is 16-bits. */ | ||
1501 | if (max >= 0x10000) | ||
1502 | return -EINVAL; | ||
1503 | break; | ||
1504 | |||
1505 | default: | ||
1506 | return -EINVAL; | ||
1507 | } | ||
1508 | |||
1509 | if (min > max) | ||
1510 | return -EINVAL; | ||
1511 | |||
1512 | return 0; | ||
1513 | } | ||
1514 | EXPORT_SYMBOL(verify_spi_info); | ||
1515 | |||
1499 | int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) | 1516 | int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) |
1500 | { | 1517 | { |
1501 | struct net *net = xs_net(x); | 1518 | struct net *net = xs_net(x); |
@@ -1536,10 +1553,10 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) | |||
1536 | } | 1553 | } |
1537 | } | 1554 | } |
1538 | if (x->id.spi) { | 1555 | if (x->id.spi) { |
1539 | spin_lock_bh(&xfrm_state_lock); | 1556 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1540 | h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); | 1557 | h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); |
1541 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); | 1558 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); |
1542 | spin_unlock_bh(&xfrm_state_lock); | 1559 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1543 | 1560 | ||
1544 | err = 0; | 1561 | err = 0; |
1545 | } | 1562 | } |
@@ -1562,7 +1579,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, | |||
1562 | if (walk->seq != 0 && list_empty(&walk->all)) | 1579 | if (walk->seq != 0 && list_empty(&walk->all)) |
1563 | return 0; | 1580 | return 0; |
1564 | 1581 | ||
1565 | spin_lock_bh(&xfrm_state_lock); | 1582 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1566 | if (list_empty(&walk->all)) | 1583 | if (list_empty(&walk->all)) |
1567 | x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); | 1584 | x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); |
1568 | else | 1585 | else |
@@ -1586,7 +1603,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, | |||
1586 | } | 1603 | } |
1587 | list_del_init(&walk->all); | 1604 | list_del_init(&walk->all); |
1588 | out: | 1605 | out: |
1589 | spin_unlock_bh(&xfrm_state_lock); | 1606 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1590 | return err; | 1607 | return err; |
1591 | } | 1608 | } |
1592 | EXPORT_SYMBOL(xfrm_state_walk); | 1609 | EXPORT_SYMBOL(xfrm_state_walk); |
@@ -1600,14 +1617,14 @@ void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto) | |||
1600 | } | 1617 | } |
1601 | EXPORT_SYMBOL(xfrm_state_walk_init); | 1618 | EXPORT_SYMBOL(xfrm_state_walk_init); |
1602 | 1619 | ||
1603 | void xfrm_state_walk_done(struct xfrm_state_walk *walk) | 1620 | void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net) |
1604 | { | 1621 | { |
1605 | if (list_empty(&walk->all)) | 1622 | if (list_empty(&walk->all)) |
1606 | return; | 1623 | return; |
1607 | 1624 | ||
1608 | spin_lock_bh(&xfrm_state_lock); | 1625 | spin_lock_bh(&net->xfrm.xfrm_state_lock); |
1609 | list_del(&walk->all); | 1626 | list_del(&walk->all); |
1610 | spin_unlock_bh(&xfrm_state_lock); | 1627 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |
1611 | } | 1628 | } |
1612 | EXPORT_SYMBOL(xfrm_state_walk_done); | 1629 | EXPORT_SYMBOL(xfrm_state_walk_done); |
1613 | 1630 | ||
@@ -1655,16 +1672,12 @@ EXPORT_SYMBOL(km_state_notify); | |||
1655 | 1672 | ||
1656 | void km_state_expired(struct xfrm_state *x, int hard, u32 portid) | 1673 | void km_state_expired(struct xfrm_state *x, int hard, u32 portid) |
1657 | { | 1674 | { |
1658 | struct net *net = xs_net(x); | ||
1659 | struct km_event c; | 1675 | struct km_event c; |
1660 | 1676 | ||
1661 | c.data.hard = hard; | 1677 | c.data.hard = hard; |
1662 | c.portid = portid; | 1678 | c.portid = portid; |
1663 | c.event = XFRM_MSG_EXPIRE; | 1679 | c.event = XFRM_MSG_EXPIRE; |
1664 | km_state_notify(x, &c); | 1680 | km_state_notify(x, &c); |
1665 | |||
1666 | if (hard) | ||
1667 | wake_up(&net->xfrm.km_waitq); | ||
1668 | } | 1681 | } |
1669 | 1682 | ||
1670 | EXPORT_SYMBOL(km_state_expired); | 1683 | EXPORT_SYMBOL(km_state_expired); |
@@ -1707,16 +1720,12 @@ EXPORT_SYMBOL(km_new_mapping); | |||
1707 | 1720 | ||
1708 | void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid) | 1721 | void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid) |
1709 | { | 1722 | { |
1710 | struct net *net = xp_net(pol); | ||
1711 | struct km_event c; | 1723 | struct km_event c; |
1712 | 1724 | ||
1713 | c.data.hard = hard; | 1725 | c.data.hard = hard; |
1714 | c.portid = portid; | 1726 | c.portid = portid; |
1715 | c.event = XFRM_MSG_POLEXPIRE; | 1727 | c.event = XFRM_MSG_POLEXPIRE; |
1716 | km_policy_notify(pol, dir, &c); | 1728 | km_policy_notify(pol, dir, &c); |
1717 | |||
1718 | if (hard) | ||
1719 | wake_up(&net->xfrm.km_waitq); | ||
1720 | } | 1729 | } |
1721 | EXPORT_SYMBOL(km_policy_expired); | 1730 | EXPORT_SYMBOL(km_policy_expired); |
1722 | 1731 | ||
@@ -2025,7 +2034,7 @@ int __net_init xfrm_state_init(struct net *net) | |||
2025 | INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); | 2034 | INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); |
2026 | INIT_HLIST_HEAD(&net->xfrm.state_gc_list); | 2035 | INIT_HLIST_HEAD(&net->xfrm.state_gc_list); |
2027 | INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task); | 2036 | INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task); |
2028 | init_waitqueue_head(&net->xfrm.km_waitq); | 2037 | spin_lock_init(&net->xfrm.xfrm_state_lock); |
2029 | return 0; | 2038 | return 0; |
2030 | 2039 | ||
2031 | out_byspi: | 2040 | out_byspi: |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index f964d4c00ffb..97681a390402 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -181,7 +181,9 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, | |||
181 | attrs[XFRMA_ALG_AEAD] || | 181 | attrs[XFRMA_ALG_AEAD] || |
182 | attrs[XFRMA_ALG_CRYPT] || | 182 | attrs[XFRMA_ALG_CRYPT] || |
183 | attrs[XFRMA_ALG_COMP] || | 183 | attrs[XFRMA_ALG_COMP] || |
184 | attrs[XFRMA_TFCPAD]) | 184 | attrs[XFRMA_TFCPAD] || |
185 | (ntohl(p->id.spi) >= 0x10000)) | ||
186 | |||
185 | goto out; | 187 | goto out; |
186 | break; | 188 | break; |
187 | 189 | ||
@@ -877,7 +879,10 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr) | |||
877 | static int xfrm_dump_sa_done(struct netlink_callback *cb) | 879 | static int xfrm_dump_sa_done(struct netlink_callback *cb) |
878 | { | 880 | { |
879 | struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; | 881 | struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; |
880 | xfrm_state_walk_done(walk); | 882 | struct sock *sk = cb->skb->sk; |
883 | struct net *net = sock_net(sk); | ||
884 | |||
885 | xfrm_state_walk_done(walk, net); | ||
881 | return 0; | 886 | return 0; |
882 | } | 887 | } |
883 | 888 | ||
@@ -1074,29 +1079,6 @@ out_noput: | |||
1074 | return err; | 1079 | return err; |
1075 | } | 1080 | } |
1076 | 1081 | ||
1077 | static int verify_userspi_info(struct xfrm_userspi_info *p) | ||
1078 | { | ||
1079 | switch (p->info.id.proto) { | ||
1080 | case IPPROTO_AH: | ||
1081 | case IPPROTO_ESP: | ||
1082 | break; | ||
1083 | |||
1084 | case IPPROTO_COMP: | ||
1085 | /* IPCOMP spi is 16-bits. */ | ||
1086 | if (p->max >= 0x10000) | ||
1087 | return -EINVAL; | ||
1088 | break; | ||
1089 | |||
1090 | default: | ||
1091 | return -EINVAL; | ||
1092 | } | ||
1093 | |||
1094 | if (p->min > p->max) | ||
1095 | return -EINVAL; | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
1100 | static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, | 1082 | static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, |
1101 | struct nlattr **attrs) | 1083 | struct nlattr **attrs) |
1102 | { | 1084 | { |
@@ -1111,7 +1093,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1111 | struct xfrm_mark m; | 1093 | struct xfrm_mark m; |
1112 | 1094 | ||
1113 | p = nlmsg_data(nlh); | 1095 | p = nlmsg_data(nlh); |
1114 | err = verify_userspi_info(p); | 1096 | err = verify_spi_info(p->info.id.proto, p->min, p->max); |
1115 | if (err) | 1097 | if (err) |
1116 | goto out_noput; | 1098 | goto out_noput; |
1117 | 1099 | ||
@@ -1189,6 +1171,8 @@ static int verify_policy_type(u8 type) | |||
1189 | 1171 | ||
1190 | static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) | 1172 | static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) |
1191 | { | 1173 | { |
1174 | int ret; | ||
1175 | |||
1192 | switch (p->share) { | 1176 | switch (p->share) { |
1193 | case XFRM_SHARE_ANY: | 1177 | case XFRM_SHARE_ANY: |
1194 | case XFRM_SHARE_SESSION: | 1178 | case XFRM_SHARE_SESSION: |
@@ -1224,7 +1208,13 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) | |||
1224 | return -EINVAL; | 1208 | return -EINVAL; |
1225 | } | 1209 | } |
1226 | 1210 | ||
1227 | return verify_policy_dir(p->dir); | 1211 | ret = verify_policy_dir(p->dir); |
1212 | if (ret) | ||
1213 | return ret; | ||
1214 | if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir)) | ||
1215 | return -EINVAL; | ||
1216 | |||
1217 | return 0; | ||
1228 | } | 1218 | } |
1229 | 1219 | ||
1230 | static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) | 1220 | static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) |
@@ -1547,8 +1537,9 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr | |||
1547 | static int xfrm_dump_policy_done(struct netlink_callback *cb) | 1537 | static int xfrm_dump_policy_done(struct netlink_callback *cb) |
1548 | { | 1538 | { |
1549 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; | 1539 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; |
1540 | struct net *net = sock_net(cb->skb->sk); | ||
1550 | 1541 | ||
1551 | xfrm_policy_walk_done(walk); | 1542 | xfrm_policy_walk_done(walk, net); |
1552 | return 0; | 1543 | return 0; |
1553 | } | 1544 | } |
1554 | 1545 | ||
@@ -2129,6 +2120,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
2129 | u8 type; | 2120 | u8 type; |
2130 | int err; | 2121 | int err; |
2131 | int n = 0; | 2122 | int n = 0; |
2123 | struct net *net = sock_net(skb->sk); | ||
2132 | 2124 | ||
2133 | if (attrs[XFRMA_MIGRATE] == NULL) | 2125 | if (attrs[XFRMA_MIGRATE] == NULL) |
2134 | return -EINVAL; | 2126 | return -EINVAL; |
@@ -2146,7 +2138,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
2146 | if (!n) | 2138 | if (!n) |
2147 | return 0; | 2139 | return 0; |
2148 | 2140 | ||
2149 | xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp); | 2141 | xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net); |
2150 | 2142 | ||
2151 | return 0; | 2143 | return 0; |
2152 | } | 2144 | } |
@@ -2394,9 +2386,11 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2394 | 2386 | ||
2395 | static void xfrm_netlink_rcv(struct sk_buff *skb) | 2387 | static void xfrm_netlink_rcv(struct sk_buff *skb) |
2396 | { | 2388 | { |
2397 | mutex_lock(&xfrm_cfg_mutex); | 2389 | struct net *net = sock_net(skb->sk); |
2390 | |||
2391 | mutex_lock(&net->xfrm.xfrm_cfg_mutex); | ||
2398 | netlink_rcv_skb(skb, &xfrm_user_rcv_msg); | 2392 | netlink_rcv_skb(skb, &xfrm_user_rcv_msg); |
2399 | mutex_unlock(&xfrm_cfg_mutex); | 2393 | mutex_unlock(&net->xfrm.xfrm_cfg_mutex); |
2400 | } | 2394 | } |
2401 | 2395 | ||
2402 | static inline size_t xfrm_expire_msgsize(void) | 2396 | static inline size_t xfrm_expire_msgsize(void) |