aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/appletalk/ddp.c25
-rw-r--r--net/core/datagram.c50
-rw-r--r--net/core/skbuff.c122
-rw-r--r--net/core/user_dma.c25
-rw-r--r--net/xfrm/xfrm_algo.c22
5 files changed, 86 insertions, 158 deletions
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index f6a92a0b7aa6..16eda21fb38c 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -937,11 +937,11 @@ static unsigned long atalk_sum_partial(const unsigned char *data,
937static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, 937static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
938 int len, unsigned long sum) 938 int len, unsigned long sum)
939{ 939{
940 int start = skb_headlen(skb); 940 int end = skb_headlen(skb);
941 int i, copy; 941 int i, copy;
942 942
943 /* checksum stuff in header space */ 943 /* checksum stuff in header space */
944 if ( (copy = start - offset) > 0) { 944 if ((copy = end - offset) > 0) {
945 if (copy > len) 945 if (copy > len)
946 copy = len; 946 copy = len;
947 sum = atalk_sum_partial(skb->data + offset, copy, sum); 947 sum = atalk_sum_partial(skb->data + offset, copy, sum);
@@ -953,11 +953,9 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
953 953
954 /* checksum stuff in frags */ 954 /* checksum stuff in frags */
955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
956 int end; 956 BUG_TRAP(len >= 0);
957 957
958 BUG_TRAP(start <= offset + len); 958 end = offset + skb_shinfo(skb)->frags[i].size;
959
960 end = start + skb_shinfo(skb)->frags[i].size;
961 if ((copy = end - offset) > 0) { 959 if ((copy = end - offset) > 0) {
962 u8 *vaddr; 960 u8 *vaddr;
963 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 961 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -965,36 +963,31 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
965 if (copy > len) 963 if (copy > len)
966 copy = len; 964 copy = len;
967 vaddr = kmap_skb_frag(frag); 965 vaddr = kmap_skb_frag(frag);
968 sum = atalk_sum_partial(vaddr + frag->page_offset + 966 sum = atalk_sum_partial(vaddr + frag->page_offset,
969 offset - start, copy, sum); 967 copy, sum);
970 kunmap_skb_frag(vaddr); 968 kunmap_skb_frag(vaddr);
971 969
972 if (!(len -= copy)) 970 if (!(len -= copy))
973 return sum; 971 return sum;
974 offset += copy; 972 offset += copy;
975 } 973 }
976 start = end;
977 } 974 }
978 975
979 if (skb_shinfo(skb)->frag_list) { 976 if (skb_shinfo(skb)->frag_list) {
980 struct sk_buff *list = skb_shinfo(skb)->frag_list; 977 struct sk_buff *list = skb_shinfo(skb)->frag_list;
981 978
982 for (; list; list = list->next) { 979 for (; list; list = list->next) {
983 int end; 980 BUG_TRAP(len >= 0);
984
985 BUG_TRAP(start <= offset + len);
986 981
987 end = start + list->len; 982 end = offset + list->len;
988 if ((copy = end - offset) > 0) { 983 if ((copy = end - offset) > 0) {
989 if (copy > len) 984 if (copy > len)
990 copy = len; 985 copy = len;
991 sum = atalk_sum_skb(list, offset - start, 986 sum = atalk_sum_skb(list, 0, copy, sum);
992 copy, sum);
993 if ((len -= copy) == 0) 987 if ((len -= copy) == 0)
994 return sum; 988 return sum;
995 offset += copy; 989 offset += copy;
996 } 990 }
997 start = end;
998 } 991 }
999 } 992 }
1000 993
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cb056f476126..e1afa7679445 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -247,8 +247,8 @@ EXPORT_SYMBOL(skb_kill_datagram);
247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, 247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
248 struct iovec *to, int len) 248 struct iovec *to, int len)
249{ 249{
250 int start = skb_headlen(skb); 250 int end = skb_headlen(skb);
251 int i, copy = start - offset; 251 int i, copy = end - offset;
252 252
253 /* Copy header. */ 253 /* Copy header. */
254 if (copy > 0) { 254 if (copy > 0) {
@@ -263,11 +263,9 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
263 263
264 /* Copy paged appendix. Hmm... why does this look so complicated? */ 264 /* Copy paged appendix. Hmm... why does this look so complicated? */
265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
266 int end; 266 BUG_TRAP(len >= 0);
267 267
268 BUG_TRAP(start <= offset + len); 268 end = offset + skb_shinfo(skb)->frags[i].size;
269
270 end = start + skb_shinfo(skb)->frags[i].size;
271 if ((copy = end - offset) > 0) { 269 if ((copy = end - offset) > 0) {
272 int err; 270 int err;
273 u8 *vaddr; 271 u8 *vaddr;
@@ -277,8 +275,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
277 if (copy > len) 275 if (copy > len)
278 copy = len; 276 copy = len;
279 vaddr = kmap(page); 277 vaddr = kmap(page);
280 err = memcpy_toiovec(to, vaddr + frag->page_offset + 278 err = memcpy_toiovec(to, vaddr + frag->page_offset,
281 offset - start, copy); 279 copy);
282 kunmap(page); 280 kunmap(page);
283 if (err) 281 if (err)
284 goto fault; 282 goto fault;
@@ -286,30 +284,24 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
286 return 0; 284 return 0;
287 offset += copy; 285 offset += copy;
288 } 286 }
289 start = end;
290 } 287 }
291 288
292 if (skb_shinfo(skb)->frag_list) { 289 if (skb_shinfo(skb)->frag_list) {
293 struct sk_buff *list = skb_shinfo(skb)->frag_list; 290 struct sk_buff *list = skb_shinfo(skb)->frag_list;
294 291
295 for (; list; list = list->next) { 292 for (; list; list = list->next) {
296 int end; 293 BUG_TRAP(len >= 0);
297
298 BUG_TRAP(start <= offset + len);
299 294
300 end = start + list->len; 295 end = offset + list->len;
301 if ((copy = end - offset) > 0) { 296 if ((copy = end - offset) > 0) {
302 if (copy > len) 297 if (copy > len)
303 copy = len; 298 copy = len;
304 if (skb_copy_datagram_iovec(list, 299 if (skb_copy_datagram_iovec(list, 0, to, copy))
305 offset - start,
306 to, copy))
307 goto fault; 300 goto fault;
308 if ((len -= copy) == 0) 301 if ((len -= copy) == 0)
309 return 0; 302 return 0;
310 offset += copy; 303 offset += copy;
311 } 304 }
312 start = end;
313 } 305 }
314 } 306 }
315 if (!len) 307 if (!len)
@@ -323,9 +315,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
323 u8 __user *to, int len, 315 u8 __user *to, int len,
324 __wsum *csump) 316 __wsum *csump)
325{ 317{
326 int start = skb_headlen(skb); 318 int end = skb_headlen(skb);
327 int pos = 0; 319 int pos = 0;
328 int i, copy = start - offset; 320 int i, copy = end - offset;
329 321
330 /* Copy header. */ 322 /* Copy header. */
331 if (copy > 0) { 323 if (copy > 0) {
@@ -344,11 +336,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
344 } 336 }
345 337
346 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
347 int end; 339 BUG_TRAP(len >= 0);
348 340
349 BUG_TRAP(start <= offset + len); 341 end = offset + skb_shinfo(skb)->frags[i].size;
350
351 end = start + skb_shinfo(skb)->frags[i].size;
352 if ((copy = end - offset) > 0) { 342 if ((copy = end - offset) > 0) {
353 __wsum csum2; 343 __wsum csum2;
354 int err = 0; 344 int err = 0;
@@ -360,8 +350,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
360 copy = len; 350 copy = len;
361 vaddr = kmap(page); 351 vaddr = kmap(page);
362 csum2 = csum_and_copy_to_user(vaddr + 352 csum2 = csum_and_copy_to_user(vaddr +
363 frag->page_offset + 353 frag->page_offset,
364 offset - start,
365 to, copy, 0, &err); 354 to, copy, 0, &err);
366 kunmap(page); 355 kunmap(page);
367 if (err) 356 if (err)
@@ -373,24 +362,20 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
373 to += copy; 362 to += copy;
374 pos += copy; 363 pos += copy;
375 } 364 }
376 start = end;
377 } 365 }
378 366
379 if (skb_shinfo(skb)->frag_list) { 367 if (skb_shinfo(skb)->frag_list) {
380 struct sk_buff *list = skb_shinfo(skb)->frag_list; 368 struct sk_buff *list = skb_shinfo(skb)->frag_list;
381 369
382 for (; list; list=list->next) { 370 for (; list; list=list->next) {
383 int end; 371 BUG_TRAP(len >= 0);
384
385 BUG_TRAP(start <= offset + len);
386 372
387 end = start + list->len; 373 end = offset + list->len;
388 if ((copy = end - offset) > 0) { 374 if ((copy = end - offset) > 0) {
389 __wsum csum2 = 0; 375 __wsum csum2 = 0;
390 if (copy > len) 376 if (copy > len)
391 copy = len; 377 copy = len;
392 if (skb_copy_and_csum_datagram(list, 378 if (skb_copy_and_csum_datagram(list, 0,
393 offset - start,
394 to, copy, 379 to, copy,
395 &csum2)) 380 &csum2))
396 goto fault; 381 goto fault;
@@ -401,7 +386,6 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
401 to += copy; 386 to += copy;
402 pos += copy; 387 pos += copy;
403 } 388 }
404 start = end;
405 } 389 }
406 } 390 }
407 if (!len) 391 if (!len)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 142257307fa2..32f087b5233e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1045,13 +1045,13 @@ pull_pages:
1045int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1045int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1046{ 1046{
1047 int i, copy; 1047 int i, copy;
1048 int start = skb_headlen(skb); 1048 int end = skb_headlen(skb);
1049 1049
1050 if (offset > (int)skb->len - len) 1050 if (offset > (int)skb->len - len)
1051 goto fault; 1051 goto fault;
1052 1052
1053 /* Copy header. */ 1053 /* Copy header. */
1054 if ((copy = start - offset) > 0) { 1054 if ((copy = end - offset) > 0) {
1055 if (copy > len) 1055 if (copy > len)
1056 copy = len; 1056 copy = len;
1057 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1057 skb_copy_from_linear_data_offset(skb, offset, to, copy);
@@ -1062,11 +1062,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1062 } 1062 }
1063 1063
1064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1065 int end; 1065 BUG_TRAP(len >= 0);
1066 1066
1067 BUG_TRAP(start <= offset + len); 1067 end = offset + skb_shinfo(skb)->frags[i].size;
1068
1069 end = start + skb_shinfo(skb)->frags[i].size;
1070 if ((copy = end - offset) > 0) { 1068 if ((copy = end - offset) > 0) {
1071 u8 *vaddr; 1069 u8 *vaddr;
1072 1070
@@ -1075,8 +1073,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1075 1073
1076 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1074 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1077 memcpy(to, 1075 memcpy(to,
1078 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1076 vaddr + skb_shinfo(skb)->frags[i].page_offset,
1079 offset - start, copy); 1077 copy);
1080 kunmap_skb_frag(vaddr); 1078 kunmap_skb_frag(vaddr);
1081 1079
1082 if ((len -= copy) == 0) 1080 if ((len -= copy) == 0)
@@ -1084,30 +1082,25 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1084 offset += copy; 1082 offset += copy;
1085 to += copy; 1083 to += copy;
1086 } 1084 }
1087 start = end;
1088 } 1085 }
1089 1086
1090 if (skb_shinfo(skb)->frag_list) { 1087 if (skb_shinfo(skb)->frag_list) {
1091 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1088 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1092 1089
1093 for (; list; list = list->next) { 1090 for (; list; list = list->next) {
1094 int end; 1091 BUG_TRAP(len >= 0);
1095
1096 BUG_TRAP(start <= offset + len);
1097 1092
1098 end = start + list->len; 1093 end = offset + list->len;
1099 if ((copy = end - offset) > 0) { 1094 if ((copy = end - offset) > 0) {
1100 if (copy > len) 1095 if (copy > len)
1101 copy = len; 1096 copy = len;
1102 if (skb_copy_bits(list, offset - start, 1097 if (skb_copy_bits(list, 0, to, copy))
1103 to, copy))
1104 goto fault; 1098 goto fault;
1105 if ((len -= copy) == 0) 1099 if ((len -= copy) == 0)
1106 return 0; 1100 return 0;
1107 offset += copy; 1101 offset += copy;
1108 to += copy; 1102 to += copy;
1109 } 1103 }
1110 start = end;
1111 } 1104 }
1112 } 1105 }
1113 if (!len) 1106 if (!len)
@@ -1132,12 +1125,12 @@ fault:
1132int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1125int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1133{ 1126{
1134 int i, copy; 1127 int i, copy;
1135 int start = skb_headlen(skb); 1128 int end = skb_headlen(skb);
1136 1129
1137 if (offset > (int)skb->len - len) 1130 if (offset > (int)skb->len - len)
1138 goto fault; 1131 goto fault;
1139 1132
1140 if ((copy = start - offset) > 0) { 1133 if ((copy = end - offset) > 0) {
1141 if (copy > len) 1134 if (copy > len)
1142 copy = len; 1135 copy = len;
1143 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1136 skb_copy_to_linear_data_offset(skb, offset, from, copy);
@@ -1149,11 +1142,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1149 1142
1150 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1143 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1151 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1144 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1152 int end; 1145 BUG_TRAP(len >= 0);
1153
1154 BUG_TRAP(start <= offset + len);
1155 1146
1156 end = start + frag->size; 1147 end = offset + frag->size;
1157 if ((copy = end - offset) > 0) { 1148 if ((copy = end - offset) > 0) {
1158 u8 *vaddr; 1149 u8 *vaddr;
1159 1150
@@ -1161,8 +1152,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1161 copy = len; 1152 copy = len;
1162 1153
1163 vaddr = kmap_skb_frag(frag); 1154 vaddr = kmap_skb_frag(frag);
1164 memcpy(vaddr + frag->page_offset + offset - start, 1155 memcpy(vaddr + frag->page_offset, from, copy);
1165 from, copy);
1166 kunmap_skb_frag(vaddr); 1156 kunmap_skb_frag(vaddr);
1167 1157
1168 if ((len -= copy) == 0) 1158 if ((len -= copy) == 0)
@@ -1170,30 +1160,25 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1170 offset += copy; 1160 offset += copy;
1171 from += copy; 1161 from += copy;
1172 } 1162 }
1173 start = end;
1174 } 1163 }
1175 1164
1176 if (skb_shinfo(skb)->frag_list) { 1165 if (skb_shinfo(skb)->frag_list) {
1177 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1166 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1178 1167
1179 for (; list; list = list->next) { 1168 for (; list; list = list->next) {
1180 int end; 1169 BUG_TRAP(len >= 0);
1181 1170
1182 BUG_TRAP(start <= offset + len); 1171 end = offset + list->len;
1183
1184 end = start + list->len;
1185 if ((copy = end - offset) > 0) { 1172 if ((copy = end - offset) > 0) {
1186 if (copy > len) 1173 if (copy > len)
1187 copy = len; 1174 copy = len;
1188 if (skb_store_bits(list, offset - start, 1175 if (skb_store_bits(list, 0, from, copy))
1189 from, copy))
1190 goto fault; 1176 goto fault;
1191 if ((len -= copy) == 0) 1177 if ((len -= copy) == 0)
1192 return 0; 1178 return 0;
1193 offset += copy; 1179 offset += copy;
1194 from += copy; 1180 from += copy;
1195 } 1181 }
1196 start = end;
1197 } 1182 }
1198 } 1183 }
1199 if (!len) 1184 if (!len)
@@ -1210,8 +1195,8 @@ EXPORT_SYMBOL(skb_store_bits);
1210__wsum skb_checksum(const struct sk_buff *skb, int offset, 1195__wsum skb_checksum(const struct sk_buff *skb, int offset,
1211 int len, __wsum csum) 1196 int len, __wsum csum)
1212{ 1197{
1213 int start = skb_headlen(skb); 1198 int end = skb_headlen(skb);
1214 int i, copy = start - offset; 1199 int i, copy = end - offset;
1215 int pos = 0; 1200 int pos = 0;
1216 1201
1217 /* Checksum header. */ 1202 /* Checksum header. */
@@ -1226,11 +1211,9 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1226 } 1211 }
1227 1212
1228 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1213 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1229 int end; 1214 BUG_TRAP(len >= 0);
1230
1231 BUG_TRAP(start <= offset + len);
1232 1215
1233 end = start + skb_shinfo(skb)->frags[i].size; 1216 end = offset + skb_shinfo(skb)->frags[i].size;
1234 if ((copy = end - offset) > 0) { 1217 if ((copy = end - offset) > 0) {
1235 __wsum csum2; 1218 __wsum csum2;
1236 u8 *vaddr; 1219 u8 *vaddr;
@@ -1239,8 +1222,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1239 if (copy > len) 1222 if (copy > len)
1240 copy = len; 1223 copy = len;
1241 vaddr = kmap_skb_frag(frag); 1224 vaddr = kmap_skb_frag(frag);
1242 csum2 = csum_partial(vaddr + frag->page_offset + 1225 csum2 = csum_partial(vaddr + frag->page_offset,
1243 offset - start, copy, 0); 1226 copy, 0);
1244 kunmap_skb_frag(vaddr); 1227 kunmap_skb_frag(vaddr);
1245 csum = csum_block_add(csum, csum2, pos); 1228 csum = csum_block_add(csum, csum2, pos);
1246 if (!(len -= copy)) 1229 if (!(len -= copy))
@@ -1248,31 +1231,26 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1248 offset += copy; 1231 offset += copy;
1249 pos += copy; 1232 pos += copy;
1250 } 1233 }
1251 start = end;
1252 } 1234 }
1253 1235
1254 if (skb_shinfo(skb)->frag_list) { 1236 if (skb_shinfo(skb)->frag_list) {
1255 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1237 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1256 1238
1257 for (; list; list = list->next) { 1239 for (; list; list = list->next) {
1258 int end; 1240 BUG_TRAP(len >= 0);
1259 1241
1260 BUG_TRAP(start <= offset + len); 1242 end = offset + list->len;
1261
1262 end = start + list->len;
1263 if ((copy = end - offset) > 0) { 1243 if ((copy = end - offset) > 0) {
1264 __wsum csum2; 1244 __wsum csum2;
1265 if (copy > len) 1245 if (copy > len)
1266 copy = len; 1246 copy = len;
1267 csum2 = skb_checksum(list, offset - start, 1247 csum2 = skb_checksum(list, 0, copy, 0);
1268 copy, 0);
1269 csum = csum_block_add(csum, csum2, pos); 1248 csum = csum_block_add(csum, csum2, pos);
1270 if ((len -= copy) == 0) 1249 if ((len -= copy) == 0)
1271 return csum; 1250 return csum;
1272 offset += copy; 1251 offset += copy;
1273 pos += copy; 1252 pos += copy;
1274 } 1253 }
1275 start = end;
1276 } 1254 }
1277 } 1255 }
1278 BUG_ON(len); 1256 BUG_ON(len);
@@ -1285,8 +1263,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1285__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1263__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1286 u8 *to, int len, __wsum csum) 1264 u8 *to, int len, __wsum csum)
1287{ 1265{
1288 int start = skb_headlen(skb); 1266 int end = skb_headlen(skb);
1289 int i, copy = start - offset; 1267 int i, copy = end - offset;
1290 int pos = 0; 1268 int pos = 0;
1291 1269
1292 /* Copy header. */ 1270 /* Copy header. */
@@ -1303,11 +1281,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1303 } 1281 }
1304 1282
1305 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1283 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1306 int end; 1284 BUG_TRAP(len >= 0);
1307
1308 BUG_TRAP(start <= offset + len);
1309 1285
1310 end = start + skb_shinfo(skb)->frags[i].size; 1286 end = offset + skb_shinfo(skb)->frags[i].size;
1311 if ((copy = end - offset) > 0) { 1287 if ((copy = end - offset) > 0) {
1312 __wsum csum2; 1288 __wsum csum2;
1313 u8 *vaddr; 1289 u8 *vaddr;
@@ -1317,9 +1293,8 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1317 copy = len; 1293 copy = len;
1318 vaddr = kmap_skb_frag(frag); 1294 vaddr = kmap_skb_frag(frag);
1319 csum2 = csum_partial_copy_nocheck(vaddr + 1295 csum2 = csum_partial_copy_nocheck(vaddr +
1320 frag->page_offset + 1296 frag->page_offset,
1321 offset - start, to, 1297 to, copy, 0);
1322 copy, 0);
1323 kunmap_skb_frag(vaddr); 1298 kunmap_skb_frag(vaddr);
1324 csum = csum_block_add(csum, csum2, pos); 1299 csum = csum_block_add(csum, csum2, pos);
1325 if (!(len -= copy)) 1300 if (!(len -= copy))
@@ -1328,7 +1303,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1328 to += copy; 1303 to += copy;
1329 pos += copy; 1304 pos += copy;
1330 } 1305 }
1331 start = end;
1332 } 1306 }
1333 1307
1334 if (skb_shinfo(skb)->frag_list) { 1308 if (skb_shinfo(skb)->frag_list) {
@@ -1336,16 +1310,13 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1336 1310
1337 for (; list; list = list->next) { 1311 for (; list; list = list->next) {
1338 __wsum csum2; 1312 __wsum csum2;
1339 int end; 1313 BUG_TRAP(len >= 0);
1340
1341 BUG_TRAP(start <= offset + len);
1342 1314
1343 end = start + list->len; 1315 end = offset + list->len;
1344 if ((copy = end - offset) > 0) { 1316 if ((copy = end - offset) > 0) {
1345 if (copy > len) 1317 if (copy > len)
1346 copy = len; 1318 copy = len;
1347 csum2 = skb_copy_and_csum_bits(list, 1319 csum2 = skb_copy_and_csum_bits(list, 0,
1348 offset - start,
1349 to, copy, 0); 1320 to, copy, 0);
1350 csum = csum_block_add(csum, csum2, pos); 1321 csum = csum_block_add(csum, csum2, pos);
1351 if ((len -= copy) == 0) 1322 if ((len -= copy) == 0)
@@ -1354,7 +1325,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1354 to += copy; 1325 to += copy;
1355 pos += copy; 1326 pos += copy;
1356 } 1327 }
1357 start = end;
1358 } 1328 }
1359 } 1329 }
1360 BUG_ON(len); 1330 BUG_ON(len);
@@ -2026,8 +1996,8 @@ void __init skb_init(void)
2026int 1996int
2027skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 1997skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2028{ 1998{
2029 int start = skb_headlen(skb); 1999 int end = skb_headlen(skb);
2030 int i, copy = start - offset; 2000 int i, copy = end - offset;
2031 int elt = 0; 2001 int elt = 0;
2032 2002
2033 if (copy > 0) { 2003 if (copy > 0) {
@@ -2043,45 +2013,39 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2043 } 2013 }
2044 2014
2045 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2015 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2046 int end; 2016 BUG_TRAP(len >= 0);
2047 2017
2048 BUG_TRAP(start <= offset + len); 2018 end = offset + skb_shinfo(skb)->frags[i].size;
2049
2050 end = start + skb_shinfo(skb)->frags[i].size;
2051 if ((copy = end - offset) > 0) { 2019 if ((copy = end - offset) > 0) {
2052 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2020 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2053 2021
2054 if (copy > len) 2022 if (copy > len)
2055 copy = len; 2023 copy = len;
2056 sg[elt].page = frag->page; 2024 sg[elt].page = frag->page;
2057 sg[elt].offset = frag->page_offset+offset-start; 2025 sg[elt].offset = frag->page_offset;
2058 sg[elt].length = copy; 2026 sg[elt].length = copy;
2059 elt++; 2027 elt++;
2060 if (!(len -= copy)) 2028 if (!(len -= copy))
2061 return elt; 2029 return elt;
2062 offset += copy; 2030 offset += copy;
2063 } 2031 }
2064 start = end;
2065 } 2032 }
2066 2033
2067 if (skb_shinfo(skb)->frag_list) { 2034 if (skb_shinfo(skb)->frag_list) {
2068 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2035 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2069 2036
2070 for (; list; list = list->next) { 2037 for (; list; list = list->next) {
2071 int end; 2038 BUG_TRAP(len >= 0);
2072
2073 BUG_TRAP(start <= offset + len);
2074 2039
2075 end = start + list->len; 2040 end = offset + list->len;
2076 if ((copy = end - offset) > 0) { 2041 if ((copy = end - offset) > 0) {
2077 if (copy > len) 2042 if (copy > len)
2078 copy = len; 2043 copy = len;
2079 elt += skb_to_sgvec(list, sg+elt, offset - start, copy); 2044 elt += skb_to_sgvec(list, sg+elt, 0, copy);
2080 if ((len -= copy) == 0) 2045 if ((len -= copy) == 0)
2081 return elt; 2046 return elt;
2082 offset += copy; 2047 offset += copy;
2083 } 2048 }
2084 start = end;
2085 } 2049 }
2086 } 2050 }
2087 BUG_ON(len); 2051 BUG_ON(len);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 0ad1cd57bc39..89241cdeea3f 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -49,8 +49,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
49 struct sk_buff *skb, int offset, struct iovec *to, 49 struct sk_buff *skb, int offset, struct iovec *to,
50 size_t len, struct dma_pinned_list *pinned_list) 50 size_t len, struct dma_pinned_list *pinned_list)
51{ 51{
52 int start = skb_headlen(skb); 52 int end = skb_headlen(skb);
53 int i, copy = start - offset; 53 int i, copy = end - offset;
54 dma_cookie_t cookie = 0; 54 dma_cookie_t cookie = 0;
55 55
56 /* Copy header. */ 56 /* Copy header. */
@@ -69,11 +69,9 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
69 69
70 /* Copy paged appendix. Hmm... why does this look so complicated? */ 70 /* Copy paged appendix. Hmm... why does this look so complicated? */
71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
72 int end; 72 BUG_TRAP(len >= 0);
73 73
74 BUG_TRAP(start <= offset + len); 74 end = offset + skb_shinfo(skb)->frags[i].size;
75
76 end = start + skb_shinfo(skb)->frags[i].size;
77 copy = end - offset; 75 copy = end - offset;
78 if ((copy = end - offset) > 0) { 76 if ((copy = end - offset) > 0) {
79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 77 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -82,8 +80,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
82 if (copy > len) 80 if (copy > len)
83 copy = len; 81 copy = len;
84 82
85 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page, 83 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list,
86 frag->page_offset + offset - start, copy); 84 page, frag->page_offset, copy);
87 if (cookie < 0) 85 if (cookie < 0)
88 goto fault; 86 goto fault;
89 len -= copy; 87 len -= copy;
@@ -91,25 +89,21 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
91 goto end; 89 goto end;
92 offset += copy; 90 offset += copy;
93 } 91 }
94 start = end;
95 } 92 }
96 93
97 if (skb_shinfo(skb)->frag_list) { 94 if (skb_shinfo(skb)->frag_list) {
98 struct sk_buff *list = skb_shinfo(skb)->frag_list; 95 struct sk_buff *list = skb_shinfo(skb)->frag_list;
99 96
100 for (; list; list = list->next) { 97 for (; list; list = list->next) {
101 int end; 98 BUG_TRAP(len >= 0);
102
103 BUG_TRAP(start <= offset + len);
104 99
105 end = start + list->len; 100 end = offset + list->len;
106 copy = end - offset; 101 copy = end - offset;
107 if (copy > 0) { 102 if (copy > 0) {
108 if (copy > len) 103 if (copy > len)
109 copy = len; 104 copy = len;
110 cookie = dma_skb_copy_datagram_iovec(chan, list, 105 cookie = dma_skb_copy_datagram_iovec(chan, list,
111 offset - start, to, copy, 106 0, to, copy, pinned_list);
112 pinned_list);
113 if (cookie < 0) 107 if (cookie < 0)
114 goto fault; 108 goto fault;
115 len -= copy; 109 len -= copy;
@@ -117,7 +111,6 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
117 goto end; 111 goto end;
118 offset += copy; 112 offset += copy;
119 } 113 }
120 start = end;
121 } 114 }
122 } 115 }
123 116
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 6249a9405bb8..be529c4241a6 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -532,8 +532,8 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
532int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, 532int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
533 int offset, int len, icv_update_fn_t icv_update) 533 int offset, int len, icv_update_fn_t icv_update)
534{ 534{
535 int start = skb_headlen(skb); 535 int end = skb_headlen(skb);
536 int i, copy = start - offset; 536 int i, copy = end - offset;
537 int err; 537 int err;
538 struct scatterlist sg; 538 struct scatterlist sg;
539 539
@@ -556,11 +556,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
556 } 556 }
557 557
558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
559 int end; 559 BUG_TRAP(len >= 0);
560 560
561 BUG_TRAP(start <= offset + len); 561 end = offset + skb_shinfo(skb)->frags[i].size;
562
563 end = start + skb_shinfo(skb)->frags[i].size;
564 if ((copy = end - offset) > 0) { 562 if ((copy = end - offset) > 0) {
565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
566 564
@@ -568,7 +566,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
568 copy = len; 566 copy = len;
569 567
570 sg.page = frag->page; 568 sg.page = frag->page;
571 sg.offset = frag->page_offset + offset-start; 569 sg.offset = frag->page_offset;
572 sg.length = copy; 570 sg.length = copy;
573 571
574 err = icv_update(desc, &sg, copy); 572 err = icv_update(desc, &sg, copy);
@@ -579,22 +577,19 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
579 return 0; 577 return 0;
580 offset += copy; 578 offset += copy;
581 } 579 }
582 start = end;
583 } 580 }
584 581
585 if (skb_shinfo(skb)->frag_list) { 582 if (skb_shinfo(skb)->frag_list) {
586 struct sk_buff *list = skb_shinfo(skb)->frag_list; 583 struct sk_buff *list = skb_shinfo(skb)->frag_list;
587 584
588 for (; list; list = list->next) { 585 for (; list; list = list->next) {
589 int end; 586 BUG_TRAP(len >= 0);
590
591 BUG_TRAP(start <= offset + len);
592 587
593 end = start + list->len; 588 end = offset + list->len;
594 if ((copy = end - offset) > 0) { 589 if ((copy = end - offset) > 0) {
595 if (copy > len) 590 if (copy > len)
596 copy = len; 591 copy = len;
597 err = skb_icv_walk(list, desc, offset-start, 592 err = skb_icv_walk(list, desc, 0,
598 copy, icv_update); 593 copy, icv_update);
599 if (unlikely(err)) 594 if (unlikely(err))
600 return err; 595 return err;
@@ -602,7 +597,6 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
602 return 0; 597 return 0;
603 offset += copy; 598 offset += copy;
604 } 599 }
605 start = end;
606 } 600 }
607 } 601 }
608 BUG_ON(len); 602 BUG_ON(len);