aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/input.c
diff options
context:
space:
mode:
authorSridhar Samudrala <sri@us.ibm.com>2006-01-17 14:56:26 -0500
committerSridhar Samudrala <sri@us.ibm.com>2006-01-17 14:56:26 -0500
commitc4d2444e992c4eda1d7fc3287e93ba58295bf6b9 (patch)
tree04f2096c141ede308356bd2d8277d4c291fae24d /net/sctp/input.c
parent313e7b4d2588539e388d31c1febd50503a0083fc (diff)
[SCTP]: Fix couple of races between sctp_peeloff() and sctp_rcv().
Validate and update the sk in sctp_rcv() to avoid the race where an assoc/ep could move to a different socket after we get the sk, but before the skb is added to the backlog. Also migrate the skb's in backlog queue to new sk when doing a peeloff. Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
Diffstat (limited to 'net/sctp/input.c')
-rw-r--r--net/sctp/input.c35
1 files changed, 34 insertions, 1 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c463e4049c52..71fd56375641 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -257,12 +257,21 @@ int sctp_rcv(struct sk_buff *skb)
257 */ 257 */
258 sctp_bh_lock_sock(sk); 258 sctp_bh_lock_sock(sk);
259 259
260 /* It is possible that the association could have moved to a different
261 * socket if it is peeled off. If so, update the sk.
262 */
263 if (sk != rcvr->sk) {
264 sctp_bh_lock_sock(rcvr->sk);
265 sctp_bh_unlock_sock(sk);
266 sk = rcvr->sk;
267 }
268
260 if (sock_owned_by_user(sk)) 269 if (sock_owned_by_user(sk))
261 sk_add_backlog(sk, skb); 270 sk_add_backlog(sk, skb);
262 else 271 else
263 sctp_backlog_rcv(sk, skb); 272 sctp_backlog_rcv(sk, skb);
264 273
265 /* Release the sock and the sock ref we took in the lookup calls. 274 /* Release the sock and the sock ref we took in the lookup calls.
266 * The asoc/ep ref will be released in sctp_backlog_rcv. 275 * The asoc/ep ref will be released in sctp_backlog_rcv.
267 */ 276 */
268 sctp_bh_unlock_sock(sk); 277 sctp_bh_unlock_sock(sk);
@@ -297,6 +306,9 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
297 struct sctp_ep_common *rcvr = NULL; 306 struct sctp_ep_common *rcvr = NULL;
298 307
299 rcvr = chunk->rcvr; 308 rcvr = chunk->rcvr;
309
310 BUG_TRAP(rcvr->sk == sk);
311
300 if (rcvr->dead) { 312 if (rcvr->dead) {
301 sctp_chunk_free(chunk); 313 sctp_chunk_free(chunk);
302 } else { 314 } else {
@@ -313,6 +325,27 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
313 return 0; 325 return 0;
314} 326}
315 327
328void sctp_backlog_migrate(struct sctp_association *assoc,
329 struct sock *oldsk, struct sock *newsk)
330{
331 struct sk_buff *skb;
332 struct sctp_chunk *chunk;
333
334 skb = oldsk->sk_backlog.head;
335 oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
336 while (skb != NULL) {
337 struct sk_buff *next = skb->next;
338
339 chunk = SCTP_INPUT_CB(skb)->chunk;
340 skb->next = NULL;
341 if (&assoc->base == chunk->rcvr)
342 sk_add_backlog(newsk, skb);
343 else
344 sk_add_backlog(oldsk, skb);
345 skb = next;
346 }
347}
348
316/* Handle icmp frag needed error. */ 349/* Handle icmp frag needed error. */
317void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, 350void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
318 struct sctp_transport *t, __u32 pmtu) 351 struct sctp_transport *t, __u32 pmtu)