diff options
author | Vladislav Yasevich <vladsilav.yasevich@hp.com> | 2006-05-05 20:03:49 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-05-05 20:03:49 -0400 |
commit | 672e7cca17ed6036a1756ed34cf20dbd72d5e5f6 (patch) | |
tree | d4c5b340e42fb7cca4d1a5282669ffae94227fdc /net/sctp | |
parent | 7c3ceb4fb9667f34f1599a062efecf4cdc4a4ce5 (diff) |
[SCTP]: Prevent possible infinite recursion with multiple bundled DATA.
There is a rare situation that causes lksctp to go into infinite recursion
and crash the system. The trigger is a packet that contains at least the
first two DATA fragments of a message bundled together. The recursion is
triggered when the user data buffer is smaller that the full data message.
The problem is that we clone the skb for every fragment in the message.
When reassembling the full message, we try to link skbs from the "first
fragment" clone using the frag_list. However, since the frag_list is shared
between two clones in this rare situation, we end up setting the frag_list
pointer of the second fragment to point to itself. This causes
sctp_skb_pull() to potentially recurse indefinitely.
Proposed solution is to make a copy of the skb when attempting to link
things using frag_list.
Signed-off-by: Vladislav Yasevich <vladsilav.yasevich@hp.com>
Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/ulpqueue.c | 27 |
1 files changed, 25 insertions, 2 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 2080b2d28c98..575e556aeb3e 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -279,6 +279,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, | |||
279 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) | 279 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) |
280 | { | 280 | { |
281 | struct sk_buff *pos; | 281 | struct sk_buff *pos; |
282 | struct sk_buff *new = NULL; | ||
282 | struct sctp_ulpevent *event; | 283 | struct sctp_ulpevent *event; |
283 | struct sk_buff *pnext, *last; | 284 | struct sk_buff *pnext, *last; |
284 | struct sk_buff *list = skb_shinfo(f_frag)->frag_list; | 285 | struct sk_buff *list = skb_shinfo(f_frag)->frag_list; |
@@ -297,11 +298,33 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu | |||
297 | */ | 298 | */ |
298 | if (last) | 299 | if (last) |
299 | last->next = pos; | 300 | last->next = pos; |
300 | else | 301 | else { |
301 | skb_shinfo(f_frag)->frag_list = pos; | 302 | if (skb_cloned(f_frag)) { |
303 | /* This is a cloned skb, we can't just modify | ||
304 | * the frag_list. We need a new skb to do that. | ||
305 | * Instead of calling skb_unshare(), we'll do it | ||
306 | * ourselves since we need to delay the free. | ||
307 | */ | ||
308 | new = skb_copy(f_frag, GFP_ATOMIC); | ||
309 | if (!new) | ||
310 | return NULL; /* try again later */ | ||
311 | |||
312 | new->sk = f_frag->sk; | ||
313 | |||
314 | skb_shinfo(new)->frag_list = pos; | ||
315 | } else | ||
316 | skb_shinfo(f_frag)->frag_list = pos; | ||
317 | } | ||
302 | 318 | ||
303 | /* Remove the first fragment from the reassembly queue. */ | 319 | /* Remove the first fragment from the reassembly queue. */ |
304 | __skb_unlink(f_frag, queue); | 320 | __skb_unlink(f_frag, queue); |
321 | |||
322 | /* if we did unshare, then free the old skb and re-assign */ | ||
323 | if (new) { | ||
324 | kfree_skb(f_frag); | ||
325 | f_frag = new; | ||
326 | } | ||
327 | |||
305 | while (pos) { | 328 | while (pos) { |
306 | 329 | ||
307 | pnext = pos->next; | 330 | pnext = pos->next; |