aboutsummaryrefslogtreecommitdiffstats
path: root/net/6lowpan
diff options
context:
space:
mode:
authorMartin Townsend <mtownsend1973@gmail.com>2014-11-06 14:15:13 -0500
committerMarcel Holtmann <marcel@holtmann.org>2014-11-06 16:09:48 -0500
commit56b2c3eea398c772dd895dc62c18cbdd1ba127b1 (patch)
treea4d6ab3f6bbf38f4c5b3cb66cc6f77d142397e32 /net/6lowpan
parent9645c76c7c233da82ff7aced0177c8a131a51e70 (diff)
6lowpan: move skb_free from error paths in decompression
Currently we ensure that the skb is freed on every error path in IPHC decompression which makes it easy to introduce skb leaks. By centralising the skb_free into the receive function it makes future decompression routines easier to maintain. It does come at the expense of ensuring that the skb passed into the decompression routine must not be copied. Signed-off-by: Martin Townsend <mtownsend1973@gmail.com> Acked-by: Jukka Rissanen <jukka.rissanen@linux.intel.com> Acked-by: Alexander Aring <alex.aring@gmail.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Diffstat (limited to 'net/6lowpan')
-rw-r--r--net/6lowpan/iphc.c31
1 files changed, 12 insertions, 19 deletions
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index cd5f8b8e34cd..aced97db62f0 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -319,7 +319,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
319 if (iphc1 & LOWPAN_IPHC_CID) { 319 if (iphc1 & LOWPAN_IPHC_CID) {
320 pr_debug("CID flag is set, increase header with one\n"); 320 pr_debug("CID flag is set, increase header with one\n");
321 if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context))) 321 if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context)))
322 goto drop; 322 return -EINVAL;
323 } 323 }
324 324
325 hdr.version = 6; 325 hdr.version = 6;
@@ -331,7 +331,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
331 */ 331 */
332 case 0: /* 00b */ 332 case 0: /* 00b */
333 if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) 333 if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
334 goto drop; 334 return -EINVAL;
335 335
336 memcpy(&hdr.flow_lbl, &skb->data[0], 3); 336 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
337 skb_pull(skb, 3); 337 skb_pull(skb, 3);
@@ -344,7 +344,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
344 */ 344 */
345 case 2: /* 10b */ 345 case 2: /* 10b */
346 if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) 346 if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
347 goto drop; 347 return -EINVAL;
348 348
349 hdr.priority = ((tmp >> 2) & 0x0f); 349 hdr.priority = ((tmp >> 2) & 0x0f);
350 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); 350 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
@@ -354,7 +354,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
354 */ 354 */
355 case 1: /* 01b */ 355 case 1: /* 01b */
356 if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) 356 if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
357 goto drop; 357 return -EINVAL;
358 358
359 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); 359 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
360 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); 360 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
@@ -371,7 +371,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
371 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { 371 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
372 /* Next header is carried inline */ 372 /* Next header is carried inline */
373 if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr))) 373 if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
374 goto drop; 374 return -EINVAL;
375 375
376 pr_debug("NH flag is set, next header carried inline: %02x\n", 376 pr_debug("NH flag is set, next header carried inline: %02x\n",
377 hdr.nexthdr); 377 hdr.nexthdr);
@@ -383,7 +383,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
383 } else { 383 } else {
384 if (lowpan_fetch_skb(skb, &hdr.hop_limit, 384 if (lowpan_fetch_skb(skb, &hdr.hop_limit,
385 sizeof(hdr.hop_limit))) 385 sizeof(hdr.hop_limit)))
386 goto drop; 386 return -EINVAL;
387 } 387 }
388 388
389 /* Extract SAM to the tmp variable */ 389 /* Extract SAM to the tmp variable */
@@ -402,7 +402,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
402 402
403 /* Check on error of previous branch */ 403 /* Check on error of previous branch */
404 if (err) 404 if (err)
405 goto drop; 405 return -EINVAL;
406 406
407 /* Extract DAM to the tmp variable */ 407 /* Extract DAM to the tmp variable */
408 tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03; 408 tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
@@ -417,7 +417,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
417 tmp); 417 tmp);
418 418
419 if (err) 419 if (err)
420 goto drop; 420 return -EINVAL;
421 } 421 }
422 } else { 422 } else {
423 err = uncompress_addr(skb, &hdr.daddr, tmp, daddr, 423 err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
@@ -425,7 +425,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
425 pr_debug("dest: stateless compression mode %d dest %pI6c\n", 425 pr_debug("dest: stateless compression mode %d dest %pI6c\n",
426 tmp, &hdr.daddr); 426 tmp, &hdr.daddr);
427 if (err) 427 if (err)
428 goto drop; 428 return -EINVAL;
429 } 429 }
430 430
431 /* UDP data uncompression */ 431 /* UDP data uncompression */
@@ -434,16 +434,14 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
434 const int needed = sizeof(struct udphdr) + sizeof(hdr); 434 const int needed = sizeof(struct udphdr) + sizeof(hdr);
435 435
436 if (uncompress_udp_header(skb, &uh)) 436 if (uncompress_udp_header(skb, &uh))
437 goto drop; 437 return -EINVAL;
438 438
439 /* replace the compressed UDP head by the uncompressed UDP 439 /* replace the compressed UDP head by the uncompressed UDP
440 * header 440 * header
441 */ 441 */
442 err = skb_cow(skb, needed); 442 err = skb_cow(skb, needed);
443 if (unlikely(err)) { 443 if (unlikely(err))
444 kfree_skb(skb);
445 return err; 444 return err;
446 }
447 445
448 skb_push(skb, sizeof(struct udphdr)); 446 skb_push(skb, sizeof(struct udphdr));
449 skb_reset_transport_header(skb); 447 skb_reset_transport_header(skb);
@@ -455,10 +453,8 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
455 hdr.nexthdr = UIP_PROTO_UDP; 453 hdr.nexthdr = UIP_PROTO_UDP;
456 } else { 454 } else {
457 err = skb_cow(skb, sizeof(hdr)); 455 err = skb_cow(skb, sizeof(hdr));
458 if (unlikely(err)) { 456 if (unlikely(err))
459 kfree_skb(skb);
460 return err; 457 return err;
461 }
462 } 458 }
463 459
464 hdr.payload_len = htons(skb->len); 460 hdr.payload_len = htons(skb->len);
@@ -478,9 +474,6 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
478 raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); 474 raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr));
479 475
480 return 0; 476 return 0;
481drop:
482 kfree_skb(skb);
483 return -EINVAL;
484} 477}
485EXPORT_SYMBOL_GPL(lowpan_header_decompress); 478EXPORT_SYMBOL_GPL(lowpan_header_decompress);
486 479