diff options
author | Sridhar Samudrala <sri@us.ibm.com> | 2006-01-17 14:56:26 -0500 |
---|---|---|
committer | Sridhar Samudrala <sri@us.ibm.com> | 2006-01-17 14:56:26 -0500 |
commit | c4d2444e992c4eda1d7fc3287e93ba58295bf6b9 (patch) | |
tree | 04f2096c141ede308356bd2d8277d4c291fae24d | |
parent | 313e7b4d2588539e388d31c1febd50503a0083fc (diff) |
[SCTP]: Fix couple of races between sctp_peeloff() and sctp_rcv().
Validate and update the sk in sctp_rcv() to avoid the race where an
assoc/ep could move to a different socket after we get the sk, but before
the skb is added to the backlog.
Also migrate the skb's in backlog queue to new sk when doing a peeloff.
Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
-rw-r--r-- | include/net/sctp/sctp.h | 2 | ||||
-rw-r--r-- | net/sctp/input.c | 35 | ||||
-rw-r--r-- | net/sctp/socket.c | 4 |
3 files changed, 40 insertions, 1 deletions
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index a553f39f6aee..e673b2c984e9 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, | |||
175 | void sctp_icmp_proto_unreachable(struct sock *sk, | 175 | void sctp_icmp_proto_unreachable(struct sock *sk, |
176 | struct sctp_association *asoc, | 176 | struct sctp_association *asoc, |
177 | struct sctp_transport *t); | 177 | struct sctp_transport *t); |
178 | void sctp_backlog_migrate(struct sctp_association *assoc, | ||
179 | struct sock *oldsk, struct sock *newsk); | ||
178 | 180 | ||
179 | /* | 181 | /* |
180 | * Section: Macros, externs, and inlines | 182 | * Section: Macros, externs, and inlines |
diff --git a/net/sctp/input.c b/net/sctp/input.c index c463e4049c52..71fd56375641 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -257,12 +257,21 @@ int sctp_rcv(struct sk_buff *skb) | |||
257 | */ | 257 | */ |
258 | sctp_bh_lock_sock(sk); | 258 | sctp_bh_lock_sock(sk); |
259 | 259 | ||
260 | /* It is possible that the association could have moved to a different | ||
261 | * socket if it is peeled off. If so, update the sk. | ||
262 | */ | ||
263 | if (sk != rcvr->sk) { | ||
264 | sctp_bh_lock_sock(rcvr->sk); | ||
265 | sctp_bh_unlock_sock(sk); | ||
266 | sk = rcvr->sk; | ||
267 | } | ||
268 | |||
260 | if (sock_owned_by_user(sk)) | 269 | if (sock_owned_by_user(sk)) |
261 | sk_add_backlog(sk, skb); | 270 | sk_add_backlog(sk, skb); |
262 | else | 271 | else |
263 | sctp_backlog_rcv(sk, skb); | 272 | sctp_backlog_rcv(sk, skb); |
264 | 273 | ||
265 | /* Release the sock and the sock ref we took in the lookup calls. | 274 | /* Release the sock and the sock ref we took in the lookup calls. |
266 | * The asoc/ep ref will be released in sctp_backlog_rcv. | 275 | * The asoc/ep ref will be released in sctp_backlog_rcv. |
267 | */ | 276 | */ |
268 | sctp_bh_unlock_sock(sk); | 277 | sctp_bh_unlock_sock(sk); |
@@ -297,6 +306,9 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
297 | struct sctp_ep_common *rcvr = NULL; | 306 | struct sctp_ep_common *rcvr = NULL; |
298 | 307 | ||
299 | rcvr = chunk->rcvr; | 308 | rcvr = chunk->rcvr; |
309 | |||
310 | BUG_TRAP(rcvr->sk == sk); | ||
311 | |||
300 | if (rcvr->dead) { | 312 | if (rcvr->dead) { |
301 | sctp_chunk_free(chunk); | 313 | sctp_chunk_free(chunk); |
302 | } else { | 314 | } else { |
@@ -313,6 +325,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
313 | return 0; | 325 | return 0; |
314 | } | 326 | } |
315 | 327 | ||
328 | void sctp_backlog_migrate(struct sctp_association *assoc, | ||
329 | struct sock *oldsk, struct sock *newsk) | ||
330 | { | ||
331 | struct sk_buff *skb; | ||
332 | struct sctp_chunk *chunk; | ||
333 | |||
334 | skb = oldsk->sk_backlog.head; | ||
335 | oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; | ||
336 | while (skb != NULL) { | ||
337 | struct sk_buff *next = skb->next; | ||
338 | |||
339 | chunk = SCTP_INPUT_CB(skb)->chunk; | ||
340 | skb->next = NULL; | ||
341 | if (&assoc->base == chunk->rcvr) | ||
342 | sk_add_backlog(newsk, skb); | ||
343 | else | ||
344 | sk_add_backlog(oldsk, skb); | ||
345 | skb = next; | ||
346 | } | ||
347 | } | ||
348 | |||
316 | /* Handle icmp frag needed error. */ | 349 | /* Handle icmp frag needed error. */ |
317 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, | 350 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, |
318 | struct sctp_transport *t, __u32 pmtu) | 351 | struct sctp_transport *t, __u32 pmtu) |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6a0b1af89932..fb1821d9f338 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
5602 | */ | 5602 | */ |
5603 | newsp->type = type; | 5603 | newsp->type = type; |
5604 | 5604 | ||
5605 | spin_lock_bh(&oldsk->sk_lock.slock); | ||
5606 | /* Migrate the backlog from oldsk to newsk. */ | ||
5607 | sctp_backlog_migrate(assoc, oldsk, newsk); | ||
5605 | /* Migrate the association to the new socket. */ | 5608 | /* Migrate the association to the new socket. */ |
5606 | sctp_assoc_migrate(assoc, newsk); | 5609 | sctp_assoc_migrate(assoc, newsk); |
5610 | spin_unlock_bh(&oldsk->sk_lock.slock); | ||
5607 | 5611 | ||
5608 | /* If the association on the newsk is already closed before accept() | 5612 | /* If the association on the newsk is already closed before accept() |
5609 | * is called, set RCV_SHUTDOWN flag. | 5613 | * is called, set RCV_SHUTDOWN flag. |