aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/lib/csum_partial_copy.c2
-rw-r--r--drivers/scsi/esp_scsi.c9
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/afs/use-rtnetlink.c2
-rw-r--r--fs/afs/vlocation.c17
-rw-r--r--include/net/wext.h2
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--net/appletalk/ddp.c25
-rw-r--r--net/core/datagram.c50
-rw-r--r--net/core/skbuff.c122
-rw-r--r--net/core/user_dma.c25
-rw-r--r--net/rxrpc/Kconfig5
-rw-r--r--net/rxrpc/rxkad.c1
-rw-r--r--net/xfrm/xfrm_algo.c22
16 files changed, 186 insertions, 103 deletions
diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
index 503dfe6d1450..118daf5a0632 100644
--- a/arch/ia64/lib/csum_partial_copy.c
+++ b/arch/ia64/lib/csum_partial_copy.c
@@ -128,6 +128,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
128 return (__force __wsum)result; 128 return (__force __wsum)result;
129} 129}
130 130
131EXPORT_SYMBOL(csum_partial_copy_from_user);
132
131__wsum 133__wsum
132csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) 134csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
133{ 135{
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 3cd5bf723da4..99ce03331b64 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/irqreturn.h>
16 17
17#include <asm/irq.h> 18#include <asm/irq.h>
18#include <asm/io.h> 19#include <asm/io.h>
@@ -1706,17 +1707,17 @@ again:
1706 if (!dma_len) { 1707 if (!dma_len) {
1707 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", 1708 printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1708 esp->host->unique_id); 1709 esp->host->unique_id);
1709 printk(KERN_ERR PFX "esp%d: cur adr[%08x] len[%08x]\n", 1710 printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1710 esp->host->unique_id, 1711 esp->host->unique_id,
1711 esp_cur_dma_addr(ent, cmd), 1712 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1712 esp_cur_dma_len(ent, cmd)); 1713 esp_cur_dma_len(ent, cmd));
1713 esp_schedule_reset(esp); 1714 esp_schedule_reset(esp);
1714 return 0; 1715 return 0;
1715 } 1716 }
1716 1717
1717 esp_log_datastart("ESP: start data addr[%08x] len[%u] " 1718 esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1718 "write(%d)\n", 1719 "write(%d)\n",
1719 dma_addr, dma_len, write); 1720 (unsigned long long)dma_addr, dma_len, write);
1720 1721
1721 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, 1722 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1722 write, ESP_CMD_DMA | ESP_CMD_TI); 1723 write, ESP_CMD_DMA | ESP_CMD_TI);
diff --git a/fs/Kconfig b/fs/Kconfig
index e33c08924572..a42f767dcdd5 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -2020,6 +2020,7 @@ config AFS_FS
2020 tristate "Andrew File System support (AFS) (EXPERIMENTAL)" 2020 tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
2021 depends on INET && EXPERIMENTAL 2021 depends on INET && EXPERIMENTAL
2022 select AF_RXRPC 2022 select AF_RXRPC
2023 select KEYS
2023 help 2024 help
2024 If you say Y here, you will get an experimental Andrew File System 2025 If you say Y here, you will get an experimental Andrew File System
2025 driver. It currently only supports unsecured read-only AFS access. 2026 driver. It currently only supports unsecured read-only AFS access.
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6dd3197d1d8d..34665f7d7a19 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -367,7 +367,7 @@ struct afs_uuid {
367 u32 time_low; /* low part of timestamp */ 367 u32 time_low; /* low part of timestamp */
368 u16 time_mid; /* mid part of timestamp */ 368 u16 time_mid; /* mid part of timestamp */
369 u16 time_hi_and_version; /* high part of timestamp and version */ 369 u16 time_hi_and_version; /* high part of timestamp and version */
370#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000 370#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000ULL
371#define AFS_UUID_TIMEHI_MASK 0x0fff 371#define AFS_UUID_TIMEHI_MASK 0x0fff
372#define AFS_UUID_VERSION_TIME 0x1000 /* time-based UUID */ 372#define AFS_UUID_VERSION_TIME 0x1000 /* time-based UUID */
373#define AFS_UUID_VERSION_NAME 0x3000 /* name-based UUID */ 373#define AFS_UUID_VERSION_NAME 0x3000 /* name-based UUID */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index e7b047328a39..222c1a3abbb8 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -772,7 +772,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
772 772
773 if (call->offset < count) { 773 if (call->offset < count) {
774 if (last) { 774 if (last) {
775 _leave(" = -EBADMSG [%d < %lu]", call->offset, count); 775 _leave(" = -EBADMSG [%d < %zu]", call->offset, count);
776 return -EBADMSG; 776 return -EBADMSG;
777 } 777 }
778 _leave(" = -EAGAIN"); 778 _leave(" = -EAGAIN");
diff --git a/fs/afs/use-rtnetlink.c b/fs/afs/use-rtnetlink.c
index 82f0daa28970..f8991c700e02 100644
--- a/fs/afs/use-rtnetlink.c
+++ b/fs/afs/use-rtnetlink.c
@@ -243,7 +243,7 @@ static int afs_read_rtm(struct afs_rtm_desc *desc)
243 desc->datalen = kernel_recvmsg(desc->nlsock, &msg, iov, 1, 243 desc->datalen = kernel_recvmsg(desc->nlsock, &msg, iov, 1,
244 desc->datamax, 0); 244 desc->datamax, 0);
245 if (desc->datalen < 0) { 245 if (desc->datalen < 0) {
246 _leave(" = %ld [recv]", desc->datalen); 246 _leave(" = %zd [recv]", desc->datalen);
247 return desc->datalen; 247 return desc->datalen;
248 } 248 }
249 249
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 74cce174882a..6c8e95a7c2c9 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -416,8 +416,8 @@ fill_in_record:
416 goto error_abandon; 416 goto error_abandon;
417 spin_lock(&vl->lock); 417 spin_lock(&vl->lock);
418 vl->state = AFS_VL_VALID; 418 vl->state = AFS_VL_VALID;
419 wake_up(&vl->waitq);
420 spin_unlock(&vl->lock); 419 spin_unlock(&vl->lock);
420 wake_up(&vl->waitq);
421 421
422 /* schedule for regular updates */ 422 /* schedule for regular updates */
423 afs_vlocation_queue_for_updates(vl); 423 afs_vlocation_queue_for_updates(vl);
@@ -442,7 +442,7 @@ found_in_memory:
442 442
443 _debug("invalid [state %d]", state); 443 _debug("invalid [state %d]", state);
444 444
445 if ((state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME)) { 445 if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) {
446 vl->state = AFS_VL_CREATING; 446 vl->state = AFS_VL_CREATING;
447 spin_unlock(&vl->lock); 447 spin_unlock(&vl->lock);
448 goto fill_in_record; 448 goto fill_in_record;
@@ -453,11 +453,10 @@ found_in_memory:
453 _debug("wait"); 453 _debug("wait");
454 454
455 spin_unlock(&vl->lock); 455 spin_unlock(&vl->lock);
456 ret = wait_event_interruptible( 456 ret = wait_event_interruptible(vl->waitq,
457 vl->waitq, 457 vl->state == AFS_VL_NEW ||
458 vl->state == AFS_VL_NEW || 458 vl->state == AFS_VL_VALID ||
459 vl->state == AFS_VL_VALID || 459 vl->state == AFS_VL_NO_VOLUME);
460 vl->state == AFS_VL_NO_VOLUME);
461 if (ret < 0) 460 if (ret < 0)
462 goto error; 461 goto error;
463 spin_lock(&vl->lock); 462 spin_lock(&vl->lock);
@@ -471,8 +470,8 @@ success:
471error_abandon: 470error_abandon:
472 spin_lock(&vl->lock); 471 spin_lock(&vl->lock);
473 vl->state = AFS_VL_NEW; 472 vl->state = AFS_VL_NEW;
474 wake_up(&vl->waitq);
475 spin_unlock(&vl->lock); 473 spin_unlock(&vl->lock);
474 wake_up(&vl->waitq);
476error: 475error:
477 ASSERT(vl != NULL); 476 ASSERT(vl != NULL);
478 afs_put_vlocation(vl); 477 afs_put_vlocation(vl);
@@ -675,7 +674,6 @@ static void afs_vlocation_updater(struct work_struct *work)
675 case 0: 674 case 0:
676 afs_vlocation_apply_update(vl, &vldb); 675 afs_vlocation_apply_update(vl, &vldb);
677 vl->state = AFS_VL_VALID; 676 vl->state = AFS_VL_VALID;
678 wake_up(&vl->waitq);
679 break; 677 break;
680 case -ENOMEDIUM: 678 case -ENOMEDIUM:
681 vl->state = AFS_VL_VOLUME_DELETED; 679 vl->state = AFS_VL_VOLUME_DELETED;
@@ -685,6 +683,7 @@ static void afs_vlocation_updater(struct work_struct *work)
685 break; 683 break;
686 } 684 }
687 spin_unlock(&vl->lock); 685 spin_unlock(&vl->lock);
686 wake_up(&vl->waitq);
688 687
689 /* and then reschedule */ 688 /* and then reschedule */
690 _debug("reschedule"); 689 _debug("reschedule");
diff --git a/include/net/wext.h b/include/net/wext.h
index 55741836a675..c02b8decf3af 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -10,7 +10,7 @@ extern int wext_proc_init(void);
10extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, 10extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd,
11 void __user *arg); 11 void __user *arg);
12#else 12#else
13static inline int wext_proc_init() 13static inline int wext_proc_init(void)
14{ 14{
15 return 0; 15 return 0;
16} 16}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f5cfde8c9025..1b3033105b40 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -279,6 +279,8 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
279 279
280 return ktime_add(kt, tmp); 280 return ktime_add(kt, tmp);
281} 281}
282
283EXPORT_SYMBOL_GPL(ktime_add_ns);
282# endif /* !CONFIG_KTIME_SCALAR */ 284# endif /* !CONFIG_KTIME_SCALAR */
283 285
284/* 286/*
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 16eda21fb38c..f6a92a0b7aa6 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -937,11 +937,11 @@ static unsigned long atalk_sum_partial(const unsigned char *data,
937static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, 937static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
938 int len, unsigned long sum) 938 int len, unsigned long sum)
939{ 939{
940 int end = skb_headlen(skb); 940 int start = skb_headlen(skb);
941 int i, copy; 941 int i, copy;
942 942
943 /* checksum stuff in header space */ 943 /* checksum stuff in header space */
944 if ((copy = end - offset) > 0) { 944 if ( (copy = start - offset) > 0) {
945 if (copy > len) 945 if (copy > len)
946 copy = len; 946 copy = len;
947 sum = atalk_sum_partial(skb->data + offset, copy, sum); 947 sum = atalk_sum_partial(skb->data + offset, copy, sum);
@@ -953,9 +953,11 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
953 953
954 /* checksum stuff in frags */ 954 /* checksum stuff in frags */
955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
956 BUG_TRAP(len >= 0); 956 int end;
957 957
958 end = offset + skb_shinfo(skb)->frags[i].size; 958 BUG_TRAP(start <= offset + len);
959
960 end = start + skb_shinfo(skb)->frags[i].size;
959 if ((copy = end - offset) > 0) { 961 if ((copy = end - offset) > 0) {
960 u8 *vaddr; 962 u8 *vaddr;
961 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 963 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -963,31 +965,36 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
963 if (copy > len) 965 if (copy > len)
964 copy = len; 966 copy = len;
965 vaddr = kmap_skb_frag(frag); 967 vaddr = kmap_skb_frag(frag);
966 sum = atalk_sum_partial(vaddr + frag->page_offset, 968 sum = atalk_sum_partial(vaddr + frag->page_offset +
967 copy, sum); 969 offset - start, copy, sum);
968 kunmap_skb_frag(vaddr); 970 kunmap_skb_frag(vaddr);
969 971
970 if (!(len -= copy)) 972 if (!(len -= copy))
971 return sum; 973 return sum;
972 offset += copy; 974 offset += copy;
973 } 975 }
976 start = end;
974 } 977 }
975 978
976 if (skb_shinfo(skb)->frag_list) { 979 if (skb_shinfo(skb)->frag_list) {
977 struct sk_buff *list = skb_shinfo(skb)->frag_list; 980 struct sk_buff *list = skb_shinfo(skb)->frag_list;
978 981
979 for (; list; list = list->next) { 982 for (; list; list = list->next) {
980 BUG_TRAP(len >= 0); 983 int end;
984
985 BUG_TRAP(start <= offset + len);
981 986
982 end = offset + list->len; 987 end = start + list->len;
983 if ((copy = end - offset) > 0) { 988 if ((copy = end - offset) > 0) {
984 if (copy > len) 989 if (copy > len)
985 copy = len; 990 copy = len;
986 sum = atalk_sum_skb(list, 0, copy, sum); 991 sum = atalk_sum_skb(list, offset - start,
992 copy, sum);
987 if ((len -= copy) == 0) 993 if ((len -= copy) == 0)
988 return sum; 994 return sum;
989 offset += copy; 995 offset += copy;
990 } 996 }
997 start = end;
991 } 998 }
992 } 999 }
993 1000
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e1afa7679445..cb056f476126 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -247,8 +247,8 @@ EXPORT_SYMBOL(skb_kill_datagram);
247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, 247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
248 struct iovec *to, int len) 248 struct iovec *to, int len)
249{ 249{
250 int end = skb_headlen(skb); 250 int start = skb_headlen(skb);
251 int i, copy = end - offset; 251 int i, copy = start - offset;
252 252
253 /* Copy header. */ 253 /* Copy header. */
254 if (copy > 0) { 254 if (copy > 0) {
@@ -263,9 +263,11 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
263 263
264 /* Copy paged appendix. Hmm... why does this look so complicated? */ 264 /* Copy paged appendix. Hmm... why does this look so complicated? */
265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
266 BUG_TRAP(len >= 0); 266 int end;
267 267
268 end = offset + skb_shinfo(skb)->frags[i].size; 268 BUG_TRAP(start <= offset + len);
269
270 end = start + skb_shinfo(skb)->frags[i].size;
269 if ((copy = end - offset) > 0) { 271 if ((copy = end - offset) > 0) {
270 int err; 272 int err;
271 u8 *vaddr; 273 u8 *vaddr;
@@ -275,8 +277,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
275 if (copy > len) 277 if (copy > len)
276 copy = len; 278 copy = len;
277 vaddr = kmap(page); 279 vaddr = kmap(page);
278 err = memcpy_toiovec(to, vaddr + frag->page_offset, 280 err = memcpy_toiovec(to, vaddr + frag->page_offset +
279 copy); 281 offset - start, copy);
280 kunmap(page); 282 kunmap(page);
281 if (err) 283 if (err)
282 goto fault; 284 goto fault;
@@ -284,24 +286,30 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
284 return 0; 286 return 0;
285 offset += copy; 287 offset += copy;
286 } 288 }
289 start = end;
287 } 290 }
288 291
289 if (skb_shinfo(skb)->frag_list) { 292 if (skb_shinfo(skb)->frag_list) {
290 struct sk_buff *list = skb_shinfo(skb)->frag_list; 293 struct sk_buff *list = skb_shinfo(skb)->frag_list;
291 294
292 for (; list; list = list->next) { 295 for (; list; list = list->next) {
293 BUG_TRAP(len >= 0); 296 int end;
297
298 BUG_TRAP(start <= offset + len);
294 299
295 end = offset + list->len; 300 end = start + list->len;
296 if ((copy = end - offset) > 0) { 301 if ((copy = end - offset) > 0) {
297 if (copy > len) 302 if (copy > len)
298 copy = len; 303 copy = len;
299 if (skb_copy_datagram_iovec(list, 0, to, copy)) 304 if (skb_copy_datagram_iovec(list,
305 offset - start,
306 to, copy))
300 goto fault; 307 goto fault;
301 if ((len -= copy) == 0) 308 if ((len -= copy) == 0)
302 return 0; 309 return 0;
303 offset += copy; 310 offset += copy;
304 } 311 }
312 start = end;
305 } 313 }
306 } 314 }
307 if (!len) 315 if (!len)
@@ -315,9 +323,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
315 u8 __user *to, int len, 323 u8 __user *to, int len,
316 __wsum *csump) 324 __wsum *csump)
317{ 325{
318 int end = skb_headlen(skb); 326 int start = skb_headlen(skb);
319 int pos = 0; 327 int pos = 0;
320 int i, copy = end - offset; 328 int i, copy = start - offset;
321 329
322 /* Copy header. */ 330 /* Copy header. */
323 if (copy > 0) { 331 if (copy > 0) {
@@ -336,9 +344,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
336 } 344 }
337 345
338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 346 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
339 BUG_TRAP(len >= 0); 347 int end;
340 348
341 end = offset + skb_shinfo(skb)->frags[i].size; 349 BUG_TRAP(start <= offset + len);
350
351 end = start + skb_shinfo(skb)->frags[i].size;
342 if ((copy = end - offset) > 0) { 352 if ((copy = end - offset) > 0) {
343 __wsum csum2; 353 __wsum csum2;
344 int err = 0; 354 int err = 0;
@@ -350,7 +360,8 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
350 copy = len; 360 copy = len;
351 vaddr = kmap(page); 361 vaddr = kmap(page);
352 csum2 = csum_and_copy_to_user(vaddr + 362 csum2 = csum_and_copy_to_user(vaddr +
353 frag->page_offset, 363 frag->page_offset +
364 offset - start,
354 to, copy, 0, &err); 365 to, copy, 0, &err);
355 kunmap(page); 366 kunmap(page);
356 if (err) 367 if (err)
@@ -362,20 +373,24 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
362 to += copy; 373 to += copy;
363 pos += copy; 374 pos += copy;
364 } 375 }
376 start = end;
365 } 377 }
366 378
367 if (skb_shinfo(skb)->frag_list) { 379 if (skb_shinfo(skb)->frag_list) {
368 struct sk_buff *list = skb_shinfo(skb)->frag_list; 380 struct sk_buff *list = skb_shinfo(skb)->frag_list;
369 381
370 for (; list; list=list->next) { 382 for (; list; list=list->next) {
371 BUG_TRAP(len >= 0); 383 int end;
384
385 BUG_TRAP(start <= offset + len);
372 386
373 end = offset + list->len; 387 end = start + list->len;
374 if ((copy = end - offset) > 0) { 388 if ((copy = end - offset) > 0) {
375 __wsum csum2 = 0; 389 __wsum csum2 = 0;
376 if (copy > len) 390 if (copy > len)
377 copy = len; 391 copy = len;
378 if (skb_copy_and_csum_datagram(list, 0, 392 if (skb_copy_and_csum_datagram(list,
393 offset - start,
379 to, copy, 394 to, copy,
380 &csum2)) 395 &csum2))
381 goto fault; 396 goto fault;
@@ -386,6 +401,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
386 to += copy; 401 to += copy;
387 pos += copy; 402 pos += copy;
388 } 403 }
404 start = end;
389 } 405 }
390 } 406 }
391 if (!len) 407 if (!len)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 32f087b5233e..142257307fa2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1045,13 +1045,13 @@ pull_pages:
1045int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1045int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1046{ 1046{
1047 int i, copy; 1047 int i, copy;
1048 int end = skb_headlen(skb); 1048 int start = skb_headlen(skb);
1049 1049
1050 if (offset > (int)skb->len - len) 1050 if (offset > (int)skb->len - len)
1051 goto fault; 1051 goto fault;
1052 1052
1053 /* Copy header. */ 1053 /* Copy header. */
1054 if ((copy = end - offset) > 0) { 1054 if ((copy = start - offset) > 0) {
1055 if (copy > len) 1055 if (copy > len)
1056 copy = len; 1056 copy = len;
1057 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1057 skb_copy_from_linear_data_offset(skb, offset, to, copy);
@@ -1062,9 +1062,11 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1062 } 1062 }
1063 1063
1064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1065 BUG_TRAP(len >= 0); 1065 int end;
1066 1066
1067 end = offset + skb_shinfo(skb)->frags[i].size; 1067 BUG_TRAP(start <= offset + len);
1068
1069 end = start + skb_shinfo(skb)->frags[i].size;
1068 if ((copy = end - offset) > 0) { 1070 if ((copy = end - offset) > 0) {
1069 u8 *vaddr; 1071 u8 *vaddr;
1070 1072
@@ -1073,8 +1075,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1073 1075
1074 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1076 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1075 memcpy(to, 1077 memcpy(to,
1076 vaddr + skb_shinfo(skb)->frags[i].page_offset, 1078 vaddr + skb_shinfo(skb)->frags[i].page_offset+
1077 copy); 1079 offset - start, copy);
1078 kunmap_skb_frag(vaddr); 1080 kunmap_skb_frag(vaddr);
1079 1081
1080 if ((len -= copy) == 0) 1082 if ((len -= copy) == 0)
@@ -1082,25 +1084,30 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1082 offset += copy; 1084 offset += copy;
1083 to += copy; 1085 to += copy;
1084 } 1086 }
1087 start = end;
1085 } 1088 }
1086 1089
1087 if (skb_shinfo(skb)->frag_list) { 1090 if (skb_shinfo(skb)->frag_list) {
1088 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1091 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1089 1092
1090 for (; list; list = list->next) { 1093 for (; list; list = list->next) {
1091 BUG_TRAP(len >= 0); 1094 int end;
1095
1096 BUG_TRAP(start <= offset + len);
1092 1097
1093 end = offset + list->len; 1098 end = start + list->len;
1094 if ((copy = end - offset) > 0) { 1099 if ((copy = end - offset) > 0) {
1095 if (copy > len) 1100 if (copy > len)
1096 copy = len; 1101 copy = len;
1097 if (skb_copy_bits(list, 0, to, copy)) 1102 if (skb_copy_bits(list, offset - start,
1103 to, copy))
1098 goto fault; 1104 goto fault;
1099 if ((len -= copy) == 0) 1105 if ((len -= copy) == 0)
1100 return 0; 1106 return 0;
1101 offset += copy; 1107 offset += copy;
1102 to += copy; 1108 to += copy;
1103 } 1109 }
1110 start = end;
1104 } 1111 }
1105 } 1112 }
1106 if (!len) 1113 if (!len)
@@ -1125,12 +1132,12 @@ fault:
1125int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1132int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1126{ 1133{
1127 int i, copy; 1134 int i, copy;
1128 int end = skb_headlen(skb); 1135 int start = skb_headlen(skb);
1129 1136
1130 if (offset > (int)skb->len - len) 1137 if (offset > (int)skb->len - len)
1131 goto fault; 1138 goto fault;
1132 1139
1133 if ((copy = end - offset) > 0) { 1140 if ((copy = start - offset) > 0) {
1134 if (copy > len) 1141 if (copy > len)
1135 copy = len; 1142 copy = len;
1136 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1143 skb_copy_to_linear_data_offset(skb, offset, from, copy);
@@ -1142,9 +1149,11 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1142 1149
1143 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1150 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1144 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1151 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1145 BUG_TRAP(len >= 0); 1152 int end;
1153
1154 BUG_TRAP(start <= offset + len);
1146 1155
1147 end = offset + frag->size; 1156 end = start + frag->size;
1148 if ((copy = end - offset) > 0) { 1157 if ((copy = end - offset) > 0) {
1149 u8 *vaddr; 1158 u8 *vaddr;
1150 1159
@@ -1152,7 +1161,8 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1152 copy = len; 1161 copy = len;
1153 1162
1154 vaddr = kmap_skb_frag(frag); 1163 vaddr = kmap_skb_frag(frag);
1155 memcpy(vaddr + frag->page_offset, from, copy); 1164 memcpy(vaddr + frag->page_offset + offset - start,
1165 from, copy);
1156 kunmap_skb_frag(vaddr); 1166 kunmap_skb_frag(vaddr);
1157 1167
1158 if ((len -= copy) == 0) 1168 if ((len -= copy) == 0)
@@ -1160,25 +1170,30 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1160 offset += copy; 1170 offset += copy;
1161 from += copy; 1171 from += copy;
1162 } 1172 }
1173 start = end;
1163 } 1174 }
1164 1175
1165 if (skb_shinfo(skb)->frag_list) { 1176 if (skb_shinfo(skb)->frag_list) {
1166 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1177 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1167 1178
1168 for (; list; list = list->next) { 1179 for (; list; list = list->next) {
1169 BUG_TRAP(len >= 0); 1180 int end;
1170 1181
1171 end = offset + list->len; 1182 BUG_TRAP(start <= offset + len);
1183
1184 end = start + list->len;
1172 if ((copy = end - offset) > 0) { 1185 if ((copy = end - offset) > 0) {
1173 if (copy > len) 1186 if (copy > len)
1174 copy = len; 1187 copy = len;
1175 if (skb_store_bits(list, 0, from, copy)) 1188 if (skb_store_bits(list, offset - start,
1189 from, copy))
1176 goto fault; 1190 goto fault;
1177 if ((len -= copy) == 0) 1191 if ((len -= copy) == 0)
1178 return 0; 1192 return 0;
1179 offset += copy; 1193 offset += copy;
1180 from += copy; 1194 from += copy;
1181 } 1195 }
1196 start = end;
1182 } 1197 }
1183 } 1198 }
1184 if (!len) 1199 if (!len)
@@ -1195,8 +1210,8 @@ EXPORT_SYMBOL(skb_store_bits);
1195__wsum skb_checksum(const struct sk_buff *skb, int offset, 1210__wsum skb_checksum(const struct sk_buff *skb, int offset,
1196 int len, __wsum csum) 1211 int len, __wsum csum)
1197{ 1212{
1198 int end = skb_headlen(skb); 1213 int start = skb_headlen(skb);
1199 int i, copy = end - offset; 1214 int i, copy = start - offset;
1200 int pos = 0; 1215 int pos = 0;
1201 1216
1202 /* Checksum header. */ 1217 /* Checksum header. */
@@ -1211,9 +1226,11 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1211 } 1226 }
1212 1227
1213 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1228 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1214 BUG_TRAP(len >= 0); 1229 int end;
1230
1231 BUG_TRAP(start <= offset + len);
1215 1232
1216 end = offset + skb_shinfo(skb)->frags[i].size; 1233 end = start + skb_shinfo(skb)->frags[i].size;
1217 if ((copy = end - offset) > 0) { 1234 if ((copy = end - offset) > 0) {
1218 __wsum csum2; 1235 __wsum csum2;
1219 u8 *vaddr; 1236 u8 *vaddr;
@@ -1222,8 +1239,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1222 if (copy > len) 1239 if (copy > len)
1223 copy = len; 1240 copy = len;
1224 vaddr = kmap_skb_frag(frag); 1241 vaddr = kmap_skb_frag(frag);
1225 csum2 = csum_partial(vaddr + frag->page_offset, 1242 csum2 = csum_partial(vaddr + frag->page_offset +
1226 copy, 0); 1243 offset - start, copy, 0);
1227 kunmap_skb_frag(vaddr); 1244 kunmap_skb_frag(vaddr);
1228 csum = csum_block_add(csum, csum2, pos); 1245 csum = csum_block_add(csum, csum2, pos);
1229 if (!(len -= copy)) 1246 if (!(len -= copy))
@@ -1231,26 +1248,31 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1231 offset += copy; 1248 offset += copy;
1232 pos += copy; 1249 pos += copy;
1233 } 1250 }
1251 start = end;
1234 } 1252 }
1235 1253
1236 if (skb_shinfo(skb)->frag_list) { 1254 if (skb_shinfo(skb)->frag_list) {
1237 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1255 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1238 1256
1239 for (; list; list = list->next) { 1257 for (; list; list = list->next) {
1240 BUG_TRAP(len >= 0); 1258 int end;
1241 1259
1242 end = offset + list->len; 1260 BUG_TRAP(start <= offset + len);
1261
1262 end = start + list->len;
1243 if ((copy = end - offset) > 0) { 1263 if ((copy = end - offset) > 0) {
1244 __wsum csum2; 1264 __wsum csum2;
1245 if (copy > len) 1265 if (copy > len)
1246 copy = len; 1266 copy = len;
1247 csum2 = skb_checksum(list, 0, copy, 0); 1267 csum2 = skb_checksum(list, offset - start,
1268 copy, 0);
1248 csum = csum_block_add(csum, csum2, pos); 1269 csum = csum_block_add(csum, csum2, pos);
1249 if ((len -= copy) == 0) 1270 if ((len -= copy) == 0)
1250 return csum; 1271 return csum;
1251 offset += copy; 1272 offset += copy;
1252 pos += copy; 1273 pos += copy;
1253 } 1274 }
1275 start = end;
1254 } 1276 }
1255 } 1277 }
1256 BUG_ON(len); 1278 BUG_ON(len);
@@ -1263,8 +1285,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1263__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1285__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1264 u8 *to, int len, __wsum csum) 1286 u8 *to, int len, __wsum csum)
1265{ 1287{
1266 int end = skb_headlen(skb); 1288 int start = skb_headlen(skb);
1267 int i, copy = end - offset; 1289 int i, copy = start - offset;
1268 int pos = 0; 1290 int pos = 0;
1269 1291
1270 /* Copy header. */ 1292 /* Copy header. */
@@ -1281,9 +1303,11 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1281 } 1303 }
1282 1304
1283 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1305 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1284 BUG_TRAP(len >= 0); 1306 int end;
1307
1308 BUG_TRAP(start <= offset + len);
1285 1309
1286 end = offset + skb_shinfo(skb)->frags[i].size; 1310 end = start + skb_shinfo(skb)->frags[i].size;
1287 if ((copy = end - offset) > 0) { 1311 if ((copy = end - offset) > 0) {
1288 __wsum csum2; 1312 __wsum csum2;
1289 u8 *vaddr; 1313 u8 *vaddr;
@@ -1293,8 +1317,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1293 copy = len; 1317 copy = len;
1294 vaddr = kmap_skb_frag(frag); 1318 vaddr = kmap_skb_frag(frag);
1295 csum2 = csum_partial_copy_nocheck(vaddr + 1319 csum2 = csum_partial_copy_nocheck(vaddr +
1296 frag->page_offset, 1320 frag->page_offset +
1297 to, copy, 0); 1321 offset - start, to,
1322 copy, 0);
1298 kunmap_skb_frag(vaddr); 1323 kunmap_skb_frag(vaddr);
1299 csum = csum_block_add(csum, csum2, pos); 1324 csum = csum_block_add(csum, csum2, pos);
1300 if (!(len -= copy)) 1325 if (!(len -= copy))
@@ -1303,6 +1328,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1303 to += copy; 1328 to += copy;
1304 pos += copy; 1329 pos += copy;
1305 } 1330 }
1331 start = end;
1306 } 1332 }
1307 1333
1308 if (skb_shinfo(skb)->frag_list) { 1334 if (skb_shinfo(skb)->frag_list) {
@@ -1310,13 +1336,16 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1310 1336
1311 for (; list; list = list->next) { 1337 for (; list; list = list->next) {
1312 __wsum csum2; 1338 __wsum csum2;
1313 BUG_TRAP(len >= 0); 1339 int end;
1340
1341 BUG_TRAP(start <= offset + len);
1314 1342
1315 end = offset + list->len; 1343 end = start + list->len;
1316 if ((copy = end - offset) > 0) { 1344 if ((copy = end - offset) > 0) {
1317 if (copy > len) 1345 if (copy > len)
1318 copy = len; 1346 copy = len;
1319 csum2 = skb_copy_and_csum_bits(list, 0, 1347 csum2 = skb_copy_and_csum_bits(list,
1348 offset - start,
1320 to, copy, 0); 1349 to, copy, 0);
1321 csum = csum_block_add(csum, csum2, pos); 1350 csum = csum_block_add(csum, csum2, pos);
1322 if ((len -= copy) == 0) 1351 if ((len -= copy) == 0)
@@ -1325,6 +1354,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1325 to += copy; 1354 to += copy;
1326 pos += copy; 1355 pos += copy;
1327 } 1356 }
1357 start = end;
1328 } 1358 }
1329 } 1359 }
1330 BUG_ON(len); 1360 BUG_ON(len);
@@ -1996,8 +2026,8 @@ void __init skb_init(void)
1996int 2026int
1997skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2027skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
1998{ 2028{
1999 int end = skb_headlen(skb); 2029 int start = skb_headlen(skb);
2000 int i, copy = end - offset; 2030 int i, copy = start - offset;
2001 int elt = 0; 2031 int elt = 0;
2002 2032
2003 if (copy > 0) { 2033 if (copy > 0) {
@@ -2013,39 +2043,45 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2013 } 2043 }
2014 2044
2015 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2045 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2016 BUG_TRAP(len >= 0); 2046 int end;
2017 2047
2018 end = offset + skb_shinfo(skb)->frags[i].size; 2048 BUG_TRAP(start <= offset + len);
2049
2050 end = start + skb_shinfo(skb)->frags[i].size;
2019 if ((copy = end - offset) > 0) { 2051 if ((copy = end - offset) > 0) {
2020 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2052 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2021 2053
2022 if (copy > len) 2054 if (copy > len)
2023 copy = len; 2055 copy = len;
2024 sg[elt].page = frag->page; 2056 sg[elt].page = frag->page;
2025 sg[elt].offset = frag->page_offset; 2057 sg[elt].offset = frag->page_offset+offset-start;
2026 sg[elt].length = copy; 2058 sg[elt].length = copy;
2027 elt++; 2059 elt++;
2028 if (!(len -= copy)) 2060 if (!(len -= copy))
2029 return elt; 2061 return elt;
2030 offset += copy; 2062 offset += copy;
2031 } 2063 }
2064 start = end;
2032 } 2065 }
2033 2066
2034 if (skb_shinfo(skb)->frag_list) { 2067 if (skb_shinfo(skb)->frag_list) {
2035 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2068 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2036 2069
2037 for (; list; list = list->next) { 2070 for (; list; list = list->next) {
2038 BUG_TRAP(len >= 0); 2071 int end;
2072
2073 BUG_TRAP(start <= offset + len);
2039 2074
2040 end = offset + list->len; 2075 end = start + list->len;
2041 if ((copy = end - offset) > 0) { 2076 if ((copy = end - offset) > 0) {
2042 if (copy > len) 2077 if (copy > len)
2043 copy = len; 2078 copy = len;
2044 elt += skb_to_sgvec(list, sg+elt, 0, copy); 2079 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
2045 if ((len -= copy) == 0) 2080 if ((len -= copy) == 0)
2046 return elt; 2081 return elt;
2047 offset += copy; 2082 offset += copy;
2048 } 2083 }
2084 start = end;
2049 } 2085 }
2050 } 2086 }
2051 BUG_ON(len); 2087 BUG_ON(len);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 89241cdeea3f..0ad1cd57bc39 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -49,8 +49,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
49 struct sk_buff *skb, int offset, struct iovec *to, 49 struct sk_buff *skb, int offset, struct iovec *to,
50 size_t len, struct dma_pinned_list *pinned_list) 50 size_t len, struct dma_pinned_list *pinned_list)
51{ 51{
52 int end = skb_headlen(skb); 52 int start = skb_headlen(skb);
53 int i, copy = end - offset; 53 int i, copy = start - offset;
54 dma_cookie_t cookie = 0; 54 dma_cookie_t cookie = 0;
55 55
56 /* Copy header. */ 56 /* Copy header. */
@@ -69,9 +69,11 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
69 69
70 /* Copy paged appendix. Hmm... why does this look so complicated? */ 70 /* Copy paged appendix. Hmm... why does this look so complicated? */
71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
72 BUG_TRAP(len >= 0); 72 int end;
73 73
74 end = offset + skb_shinfo(skb)->frags[i].size; 74 BUG_TRAP(start <= offset + len);
75
76 end = start + skb_shinfo(skb)->frags[i].size;
75 copy = end - offset; 77 copy = end - offset;
76 if ((copy = end - offset) > 0) { 78 if ((copy = end - offset) > 0) {
77 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -80,8 +82,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
80 if (copy > len) 82 if (copy > len)
81 copy = len; 83 copy = len;
82 84
83 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, 85 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
84 page, frag->page_offset, copy); 86 frag->page_offset + offset - start, copy);
85 if (cookie < 0) 87 if (cookie < 0)
86 goto fault; 88 goto fault;
87 len -= copy; 89 len -= copy;
@@ -89,21 +91,25 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
89 goto end; 91 goto end;
90 offset += copy; 92 offset += copy;
91 } 93 }
94 start = end;
92 } 95 }
93 96
94 if (skb_shinfo(skb)->frag_list) { 97 if (skb_shinfo(skb)->frag_list) {
95 struct sk_buff *list = skb_shinfo(skb)->frag_list; 98 struct sk_buff *list = skb_shinfo(skb)->frag_list;
96 99
97 for (; list; list = list->next) { 100 for (; list; list = list->next) {
98 BUG_TRAP(len >= 0); 101 int end;
102
103 BUG_TRAP(start <= offset + len);
99 104
100 end = offset + list->len; 105 end = start + list->len;
101 copy = end - offset; 106 copy = end - offset;
102 if (copy > 0) { 107 if (copy > 0) {
103 if (copy > len) 108 if (copy > len)
104 copy = len; 109 copy = len;
105 cookie = dma_skb_copy_datagram_iovec(chan, list, 110 cookie = dma_skb_copy_datagram_iovec(chan, list,
106 0, to, copy, pinned_list); 111 offset - start, to, copy,
112 pinned_list);
107 if (cookie < 0) 113 if (cookie < 0)
108 goto fault; 114 goto fault;
109 len -= copy; 115 len -= copy;
@@ -111,6 +117,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
111 goto end; 117 goto end;
112 offset += copy; 118 offset += copy;
113 } 119 }
120 start = end;
114 } 121 }
115 } 122 }
116 123
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index d72380e304ae..8750f6da6bc7 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -30,6 +30,11 @@ config AF_RXRPC_DEBUG
30config RXKAD 30config RXKAD
31 tristate "RxRPC Kerberos security" 31 tristate "RxRPC Kerberos security"
32 depends on AF_RXRPC && KEYS 32 depends on AF_RXRPC && KEYS
33 select CRYPTO
34 select CRYPTO_MANAGER
35 select CRYPTO_BLKCIPHER
36 select CRYPTO_PCBC
37 select CRYPTO_FCRYPT
33 help 38 help
34 Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC 39 Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC
35 through the use of the key retention service. 40 through the use of the key retention service.
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 1eaf529efac1..5ec705144e10 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -18,6 +18,7 @@
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/af_rxrpc.h> 20#include <net/af_rxrpc.h>
21#define rxrpc_debug rxkad_debug
21#include "ar-internal.h" 22#include "ar-internal.h"
22 23
23#define RXKAD_VERSION 2 24#define RXKAD_VERSION 2
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index be529c4241a6..6249a9405bb8 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -532,8 +532,8 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
532int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, 532int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
533 int offset, int len, icv_update_fn_t icv_update) 533 int offset, int len, icv_update_fn_t icv_update)
534{ 534{
535 int end = skb_headlen(skb); 535 int start = skb_headlen(skb);
536 int i, copy = end - offset; 536 int i, copy = start - offset;
537 int err; 537 int err;
538 struct scatterlist sg; 538 struct scatterlist sg;
539 539
@@ -556,9 +556,11 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
556 } 556 }
557 557
558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
559 BUG_TRAP(len >= 0); 559 int end;
560 560
561 end = offset + skb_shinfo(skb)->frags[i].size; 561 BUG_TRAP(start <= offset + len);
562
563 end = start + skb_shinfo(skb)->frags[i].size;
562 if ((copy = end - offset) > 0) { 564 if ((copy = end - offset) > 0) {
563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
564 566
@@ -566,7 +568,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
566 copy = len; 568 copy = len;
567 569
568 sg.page = frag->page; 570 sg.page = frag->page;
569 sg.offset = frag->page_offset; 571 sg.offset = frag->page_offset + offset-start;
570 sg.length = copy; 572 sg.length = copy;
571 573
572 err = icv_update(desc, &sg, copy); 574 err = icv_update(desc, &sg, copy);
@@ -577,19 +579,22 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
577 return 0; 579 return 0;
578 offset += copy; 580 offset += copy;
579 } 581 }
582 start = end;
580 } 583 }
581 584
582 if (skb_shinfo(skb)->frag_list) { 585 if (skb_shinfo(skb)->frag_list) {
583 struct sk_buff *list = skb_shinfo(skb)->frag_list; 586 struct sk_buff *list = skb_shinfo(skb)->frag_list;
584 587
585 for (; list; list = list->next) { 588 for (; list; list = list->next) {
586 BUG_TRAP(len >= 0); 589 int end;
590
591 BUG_TRAP(start <= offset + len);
587 592
588 end = offset + list->len; 593 end = start + list->len;
589 if ((copy = end - offset) > 0) { 594 if ((copy = end - offset) > 0) {
590 if (copy > len) 595 if (copy > len)
591 copy = len; 596 copy = len;
592 err = skb_icv_walk(list, desc, 0, 597 err = skb_icv_walk(list, desc, offset-start,
593 copy, icv_update); 598 copy, icv_update);
594 if (unlikely(err)) 599 if (unlikely(err))
595 return err; 600 return err;
@@ -597,6 +602,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
597 return 0; 602 return 0;
598 offset += copy; 603 offset += copy;
599 } 604 }
605 start = end;
600 } 606 }
601 } 607 }
602 BUG_ON(len); 608 BUG_ON(len);