aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-04-20 01:30:14 -0400
committerDavid S. Miller <davem@davemloft.net>2005-04-20 01:30:14 -0400
commit357b40a18b04c699da1d45608436e9b76b50e251 (patch)
tree51c4480c9508a911d52a3f69bbe84ec1191fd202
parentfd92833a52b972aafacced959f4a3f7541936a9b (diff)
[IPV6]: IPV6_CHECKSUM socket option can corrupt kernel memory
So here is a patch that introduces skb_store_bits -- the opposite of skb_copy_bits, and uses them to read/write the csum field in rawv6. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--net/core/skbuff.c88
-rw-r--r--net/ipv6/raw.c53
3 files changed, 130 insertions, 13 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index aa35797ebfbf..9f2d75e4f087 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1183,6 +1183,8 @@ extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
1183 int len, unsigned int csum); 1183 int len, unsigned int csum);
1184extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1184extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1185 void *to, int len); 1185 void *to, int len);
1186extern int skb_store_bits(const struct sk_buff *skb, int offset,
1187 void *from, int len);
1186extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, 1188extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
1187 int offset, u8 *to, int len, 1189 int offset, u8 *to, int len,
1188 unsigned int csum); 1190 unsigned int csum);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bf02ca9f80ac..c96559574a3f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -985,6 +985,94 @@ fault:
985 return -EFAULT; 985 return -EFAULT;
986} 986}
987 987
988/**
989 * skb_store_bits - store bits from kernel buffer to skb
990 * @skb: destination buffer
991 * @offset: offset in destination
992 * @from: source buffer
993 * @len: number of bytes to copy
994 *
995 * Copy the specified number of bytes from the source buffer to the
996 * destination skb. This function handles all the messy bits of
997 * traversing fragment lists and such.
998 */
999
1000int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
1001{
1002 int i, copy;
1003 int start = skb_headlen(skb);
1004
1005 if (offset > (int)skb->len - len)
1006 goto fault;
1007
1008 if ((copy = start - offset) > 0) {
1009 if (copy > len)
1010 copy = len;
1011 memcpy(skb->data + offset, from, copy);
1012 if ((len -= copy) == 0)
1013 return 0;
1014 offset += copy;
1015 from += copy;
1016 }
1017
1018 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1019 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1020 int end;
1021
1022 BUG_TRAP(start <= offset + len);
1023
1024 end = start + frag->size;
1025 if ((copy = end - offset) > 0) {
1026 u8 *vaddr;
1027
1028 if (copy > len)
1029 copy = len;
1030
1031 vaddr = kmap_skb_frag(frag);
1032 memcpy(vaddr + frag->page_offset + offset - start,
1033 from, copy);
1034 kunmap_skb_frag(vaddr);
1035
1036 if ((len -= copy) == 0)
1037 return 0;
1038 offset += copy;
1039 from += copy;
1040 }
1041 start = end;
1042 }
1043
1044 if (skb_shinfo(skb)->frag_list) {
1045 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1046
1047 for (; list; list = list->next) {
1048 int end;
1049
1050 BUG_TRAP(start <= offset + len);
1051
1052 end = start + list->len;
1053 if ((copy = end - offset) > 0) {
1054 if (copy > len)
1055 copy = len;
1056 if (skb_store_bits(list, offset - start,
1057 from, copy))
1058 goto fault;
1059 if ((len -= copy) == 0)
1060 return 0;
1061 offset += copy;
1062 from += copy;
1063 }
1064 start = end;
1065 }
1066 }
1067 if (!len)
1068 return 0;
1069
1070fault:
1071 return -EFAULT;
1072}
1073
1074EXPORT_SYMBOL(skb_store_bits);
1075
988/* Checksum skb data. */ 1076/* Checksum skb data. */
989 1077
990unsigned int skb_checksum(const struct sk_buff *skb, int offset, 1078unsigned int skb_checksum(const struct sk_buff *skb, int offset,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5488ad0de4f6..3e2ad0a70412 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -34,6 +34,7 @@
34#include <linux/netfilter_ipv6.h> 34#include <linux/netfilter_ipv6.h>
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36#include <asm/ioctls.h> 36#include <asm/ioctls.h>
37#include <asm/bug.h>
37 38
38#include <net/ip.h> 39#include <net/ip.h>
39#include <net/sock.h> 40#include <net/sock.h>
@@ -452,12 +453,15 @@ csum_copy_err:
452} 453}
453 454
454static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, 455static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
455 struct raw6_sock *rp, int len) 456 struct raw6_sock *rp)
456{ 457{
458 struct inet_sock *inet = inet_sk(sk);
457 struct sk_buff *skb; 459 struct sk_buff *skb;
458 int err = 0; 460 int err = 0;
459 u16 *csum; 461 int offset;
462 int len;
460 u32 tmp_csum; 463 u32 tmp_csum;
464 u16 csum;
461 465
462 if (!rp->checksum) 466 if (!rp->checksum)
463 goto send; 467 goto send;
@@ -465,10 +469,10 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
465 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) 469 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
466 goto out; 470 goto out;
467 471
468 if (rp->offset + 1 < len) 472 offset = rp->offset;
469 csum = (u16 *)(skb->h.raw + rp->offset); 473 if (offset >= inet->cork.length - 1) {
470 else {
471 err = -EINVAL; 474 err = -EINVAL;
475 ip6_flush_pending_frames(sk);
472 goto out; 476 goto out;
473 } 477 }
474 478
@@ -479,23 +483,46 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
479 */ 483 */
480 tmp_csum = skb->csum; 484 tmp_csum = skb->csum;
481 } else { 485 } else {
486 struct sk_buff *csum_skb = NULL;
482 tmp_csum = 0; 487 tmp_csum = 0;
483 488
484 skb_queue_walk(&sk->sk_write_queue, skb) { 489 skb_queue_walk(&sk->sk_write_queue, skb) {
485 tmp_csum = csum_add(tmp_csum, skb->csum); 490 tmp_csum = csum_add(tmp_csum, skb->csum);
491
492 if (csum_skb)
493 continue;
494
495 len = skb->len - (skb->h.raw - skb->data);
496 if (offset >= len) {
497 offset -= len;
498 continue;
499 }
500
501 csum_skb = skb;
486 } 502 }
503
504 skb = csum_skb;
487 } 505 }
488 506
507 offset += skb->h.raw - skb->data;
508 if (skb_copy_bits(skb, offset, &csum, 2))
509 BUG();
510
489 /* in case cksum was not initialized */ 511 /* in case cksum was not initialized */
490 if (unlikely(*csum)) 512 if (unlikely(csum))
491 tmp_csum = csum_sub(tmp_csum, *csum); 513 tmp_csum = csum_sub(tmp_csum, csum);
514
515 tmp_csum = csum_ipv6_magic(&fl->fl6_src,
516 &fl->fl6_dst,
517 inet->cork.length, fl->proto, tmp_csum);
518
519 if (tmp_csum == 0)
520 tmp_csum = -1;
492 521
493 *csum = csum_ipv6_magic(&fl->fl6_src, 522 csum = tmp_csum;
494 &fl->fl6_dst, 523 if (skb_store_bits(skb, offset, &csum, 2))
495 len, fl->proto, tmp_csum); 524 BUG();
496 525
497 if (*csum == 0)
498 *csum = -1;
499send: 526send:
500 err = ip6_push_pending_frames(sk); 527 err = ip6_push_pending_frames(sk);
501out: 528out:
@@ -774,7 +801,7 @@ back_from_confirm:
774 if (err) 801 if (err)
775 ip6_flush_pending_frames(sk); 802 ip6_flush_pending_frames(sk);
776 else if (!(msg->msg_flags & MSG_MORE)) 803 else if (!(msg->msg_flags & MSG_MORE))
777 err = rawv6_push_pending_frames(sk, &fl, rp, len); 804 err = rawv6_push_pending_frames(sk, &fl, rp);
778 } 805 }
779done: 806done:
780 ip6_dst_store(sk, dst, 807 ip6_dst_store(sk, dst,