aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-04-27 18:21:23 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-27 18:21:23 -0400
commit1a028e50729b85d0a038fad13daf0ee201a37454 (patch)
treea4ffbf7c5476203e1db51b4bd9d076c69d772697 /net/core
parent50f732ee63b91eb08a29974b36bd63e1150bb642 (diff)
[NET]: Revert sk_buff walker cleanups.
This reverts eefa3906283a2b60a6d02a2cda593a7d7d7946c5 The simplification made in that change works with the assumption that the 'offset' parameter to these functions is always positive or zero, which is not true. It can be and often is negative in order to access SKB header values in front of skb->data. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c50
-rw-r--r--net/core/skbuff.c122
-rw-r--r--net/core/user_dma.c25
3 files changed, 128 insertions, 69 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e1afa7679445..cb056f476126 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -247,8 +247,8 @@ EXPORT_SYMBOL(skb_kill_datagram);
247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, 247int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
248 struct iovec *to, int len) 248 struct iovec *to, int len)
249{ 249{
250 int end = skb_headlen(skb); 250 int start = skb_headlen(skb);
251 int i, copy = end - offset; 251 int i, copy = start - offset;
252 252
253 /* Copy header. */ 253 /* Copy header. */
254 if (copy > 0) { 254 if (copy > 0) {
@@ -263,9 +263,11 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
263 263
264 /* Copy paged appendix. Hmm... why does this look so complicated? */ 264 /* Copy paged appendix. Hmm... why does this look so complicated? */
265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
266 BUG_TRAP(len >= 0); 266 int end;
267 267
268 end = offset + skb_shinfo(skb)->frags[i].size; 268 BUG_TRAP(start <= offset + len);
269
270 end = start + skb_shinfo(skb)->frags[i].size;
269 if ((copy = end - offset) > 0) { 271 if ((copy = end - offset) > 0) {
270 int err; 272 int err;
271 u8 *vaddr; 273 u8 *vaddr;
@@ -275,8 +277,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
275 if (copy > len) 277 if (copy > len)
276 copy = len; 278 copy = len;
277 vaddr = kmap(page); 279 vaddr = kmap(page);
278 err = memcpy_toiovec(to, vaddr + frag->page_offset, 280 err = memcpy_toiovec(to, vaddr + frag->page_offset +
279 copy); 281 offset - start, copy);
280 kunmap(page); 282 kunmap(page);
281 if (err) 283 if (err)
282 goto fault; 284 goto fault;
@@ -284,24 +286,30 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
284 return 0; 286 return 0;
285 offset += copy; 287 offset += copy;
286 } 288 }
289 start = end;
287 } 290 }
288 291
289 if (skb_shinfo(skb)->frag_list) { 292 if (skb_shinfo(skb)->frag_list) {
290 struct sk_buff *list = skb_shinfo(skb)->frag_list; 293 struct sk_buff *list = skb_shinfo(skb)->frag_list;
291 294
292 for (; list; list = list->next) { 295 for (; list; list = list->next) {
293 BUG_TRAP(len >= 0); 296 int end;
297
298 BUG_TRAP(start <= offset + len);
294 299
295 end = offset + list->len; 300 end = start + list->len;
296 if ((copy = end - offset) > 0) { 301 if ((copy = end - offset) > 0) {
297 if (copy > len) 302 if (copy > len)
298 copy = len; 303 copy = len;
299 if (skb_copy_datagram_iovec(list, 0, to, copy)) 304 if (skb_copy_datagram_iovec(list,
305 offset - start,
306 to, copy))
300 goto fault; 307 goto fault;
301 if ((len -= copy) == 0) 308 if ((len -= copy) == 0)
302 return 0; 309 return 0;
303 offset += copy; 310 offset += copy;
304 } 311 }
312 start = end;
305 } 313 }
306 } 314 }
307 if (!len) 315 if (!len)
@@ -315,9 +323,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
315 u8 __user *to, int len, 323 u8 __user *to, int len,
316 __wsum *csump) 324 __wsum *csump)
317{ 325{
318 int end = skb_headlen(skb); 326 int start = skb_headlen(skb);
319 int pos = 0; 327 int pos = 0;
320 int i, copy = end - offset; 328 int i, copy = start - offset;
321 329
322 /* Copy header. */ 330 /* Copy header. */
323 if (copy > 0) { 331 if (copy > 0) {
@@ -336,9 +344,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
336 } 344 }
337 345
338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 346 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
339 BUG_TRAP(len >= 0); 347 int end;
340 348
341 end = offset + skb_shinfo(skb)->frags[i].size; 349 BUG_TRAP(start <= offset + len);
350
351 end = start + skb_shinfo(skb)->frags[i].size;
342 if ((copy = end - offset) > 0) { 352 if ((copy = end - offset) > 0) {
343 __wsum csum2; 353 __wsum csum2;
344 int err = 0; 354 int err = 0;
@@ -350,7 +360,8 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
350 copy = len; 360 copy = len;
351 vaddr = kmap(page); 361 vaddr = kmap(page);
352 csum2 = csum_and_copy_to_user(vaddr + 362 csum2 = csum_and_copy_to_user(vaddr +
353 frag->page_offset, 363 frag->page_offset +
364 offset - start,
354 to, copy, 0, &err); 365 to, copy, 0, &err);
355 kunmap(page); 366 kunmap(page);
356 if (err) 367 if (err)
@@ -362,20 +373,24 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
362 to += copy; 373 to += copy;
363 pos += copy; 374 pos += copy;
364 } 375 }
376 start = end;
365 } 377 }
366 378
367 if (skb_shinfo(skb)->frag_list) { 379 if (skb_shinfo(skb)->frag_list) {
368 struct sk_buff *list = skb_shinfo(skb)->frag_list; 380 struct sk_buff *list = skb_shinfo(skb)->frag_list;
369 381
370 for (; list; list=list->next) { 382 for (; list; list=list->next) {
371 BUG_TRAP(len >= 0); 383 int end;
384
385 BUG_TRAP(start <= offset + len);
372 386
373 end = offset + list->len; 387 end = start + list->len;
374 if ((copy = end - offset) > 0) { 388 if ((copy = end - offset) > 0) {
375 __wsum csum2 = 0; 389 __wsum csum2 = 0;
376 if (copy > len) 390 if (copy > len)
377 copy = len; 391 copy = len;
378 if (skb_copy_and_csum_datagram(list, 0, 392 if (skb_copy_and_csum_datagram(list,
393 offset - start,
379 to, copy, 394 to, copy,
380 &csum2)) 395 &csum2))
381 goto fault; 396 goto fault;
@@ -386,6 +401,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
386 to += copy; 401 to += copy;
387 pos += copy; 402 pos += copy;
388 } 403 }
404 start = end;
389 } 405 }
390 } 406 }
391 if (!len) 407 if (!len)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 32f087b5233e..142257307fa2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1045,13 +1045,13 @@ pull_pages:
1045int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1045int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1046{ 1046{
1047 int i, copy; 1047 int i, copy;
1048 int end = skb_headlen(skb); 1048 int start = skb_headlen(skb);
1049 1049
1050 if (offset > (int)skb->len - len) 1050 if (offset > (int)skb->len - len)
1051 goto fault; 1051 goto fault;
1052 1052
1053 /* Copy header. */ 1053 /* Copy header. */
1054 if ((copy = end - offset) > 0) { 1054 if ((copy = start - offset) > 0) {
1055 if (copy > len) 1055 if (copy > len)
1056 copy = len; 1056 copy = len;
1057 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1057 skb_copy_from_linear_data_offset(skb, offset, to, copy);
@@ -1062,9 +1062,11 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1062 } 1062 }
1063 1063
1064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1064 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1065 BUG_TRAP(len >= 0); 1065 int end;
1066 1066
1067 end = offset + skb_shinfo(skb)->frags[i].size; 1067 BUG_TRAP(start <= offset + len);
1068
1069 end = start + skb_shinfo(skb)->frags[i].size;
1068 if ((copy = end - offset) > 0) { 1070 if ((copy = end - offset) > 0) {
1069 u8 *vaddr; 1071 u8 *vaddr;
1070 1072
@@ -1073,8 +1075,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1073 1075
1074 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1076 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1075 memcpy(to, 1077 memcpy(to,
1076 vaddr + skb_shinfo(skb)->frags[i].page_offset, 1078 vaddr + skb_shinfo(skb)->frags[i].page_offset+
1077 copy); 1079 offset - start, copy);
1078 kunmap_skb_frag(vaddr); 1080 kunmap_skb_frag(vaddr);
1079 1081
1080 if ((len -= copy) == 0) 1082 if ((len -= copy) == 0)
@@ -1082,25 +1084,30 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1082 offset += copy; 1084 offset += copy;
1083 to += copy; 1085 to += copy;
1084 } 1086 }
1087 start = end;
1085 } 1088 }
1086 1089
1087 if (skb_shinfo(skb)->frag_list) { 1090 if (skb_shinfo(skb)->frag_list) {
1088 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1091 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1089 1092
1090 for (; list; list = list->next) { 1093 for (; list; list = list->next) {
1091 BUG_TRAP(len >= 0); 1094 int end;
1095
1096 BUG_TRAP(start <= offset + len);
1092 1097
1093 end = offset + list->len; 1098 end = start + list->len;
1094 if ((copy = end - offset) > 0) { 1099 if ((copy = end - offset) > 0) {
1095 if (copy > len) 1100 if (copy > len)
1096 copy = len; 1101 copy = len;
1097 if (skb_copy_bits(list, 0, to, copy)) 1102 if (skb_copy_bits(list, offset - start,
1103 to, copy))
1098 goto fault; 1104 goto fault;
1099 if ((len -= copy) == 0) 1105 if ((len -= copy) == 0)
1100 return 0; 1106 return 0;
1101 offset += copy; 1107 offset += copy;
1102 to += copy; 1108 to += copy;
1103 } 1109 }
1110 start = end;
1104 } 1111 }
1105 } 1112 }
1106 if (!len) 1113 if (!len)
@@ -1125,12 +1132,12 @@ fault:
1125int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1132int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1126{ 1133{
1127 int i, copy; 1134 int i, copy;
1128 int end = skb_headlen(skb); 1135 int start = skb_headlen(skb);
1129 1136
1130 if (offset > (int)skb->len - len) 1137 if (offset > (int)skb->len - len)
1131 goto fault; 1138 goto fault;
1132 1139
1133 if ((copy = end - offset) > 0) { 1140 if ((copy = start - offset) > 0) {
1134 if (copy > len) 1141 if (copy > len)
1135 copy = len; 1142 copy = len;
1136 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1143 skb_copy_to_linear_data_offset(skb, offset, from, copy);
@@ -1142,9 +1149,11 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1142 1149
1143 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1150 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1144 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1151 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1145 BUG_TRAP(len >= 0); 1152 int end;
1153
1154 BUG_TRAP(start <= offset + len);
1146 1155
1147 end = offset + frag->size; 1156 end = start + frag->size;
1148 if ((copy = end - offset) > 0) { 1157 if ((copy = end - offset) > 0) {
1149 u8 *vaddr; 1158 u8 *vaddr;
1150 1159
@@ -1152,7 +1161,8 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1152 copy = len; 1161 copy = len;
1153 1162
1154 vaddr = kmap_skb_frag(frag); 1163 vaddr = kmap_skb_frag(frag);
1155 memcpy(vaddr + frag->page_offset, from, copy); 1164 memcpy(vaddr + frag->page_offset + offset - start,
1165 from, copy);
1156 kunmap_skb_frag(vaddr); 1166 kunmap_skb_frag(vaddr);
1157 1167
1158 if ((len -= copy) == 0) 1168 if ((len -= copy) == 0)
@@ -1160,25 +1170,30 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1160 offset += copy; 1170 offset += copy;
1161 from += copy; 1171 from += copy;
1162 } 1172 }
1173 start = end;
1163 } 1174 }
1164 1175
1165 if (skb_shinfo(skb)->frag_list) { 1176 if (skb_shinfo(skb)->frag_list) {
1166 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1177 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1167 1178
1168 for (; list; list = list->next) { 1179 for (; list; list = list->next) {
1169 BUG_TRAP(len >= 0); 1180 int end;
1170 1181
1171 end = offset + list->len; 1182 BUG_TRAP(start <= offset + len);
1183
1184 end = start + list->len;
1172 if ((copy = end - offset) > 0) { 1185 if ((copy = end - offset) > 0) {
1173 if (copy > len) 1186 if (copy > len)
1174 copy = len; 1187 copy = len;
1175 if (skb_store_bits(list, 0, from, copy)) 1188 if (skb_store_bits(list, offset - start,
1189 from, copy))
1176 goto fault; 1190 goto fault;
1177 if ((len -= copy) == 0) 1191 if ((len -= copy) == 0)
1178 return 0; 1192 return 0;
1179 offset += copy; 1193 offset += copy;
1180 from += copy; 1194 from += copy;
1181 } 1195 }
1196 start = end;
1182 } 1197 }
1183 } 1198 }
1184 if (!len) 1199 if (!len)
@@ -1195,8 +1210,8 @@ EXPORT_SYMBOL(skb_store_bits);
1195__wsum skb_checksum(const struct sk_buff *skb, int offset, 1210__wsum skb_checksum(const struct sk_buff *skb, int offset,
1196 int len, __wsum csum) 1211 int len, __wsum csum)
1197{ 1212{
1198 int end = skb_headlen(skb); 1213 int start = skb_headlen(skb);
1199 int i, copy = end - offset; 1214 int i, copy = start - offset;
1200 int pos = 0; 1215 int pos = 0;
1201 1216
1202 /* Checksum header. */ 1217 /* Checksum header. */
@@ -1211,9 +1226,11 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1211 } 1226 }
1212 1227
1213 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1228 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1214 BUG_TRAP(len >= 0); 1229 int end;
1230
1231 BUG_TRAP(start <= offset + len);
1215 1232
1216 end = offset + skb_shinfo(skb)->frags[i].size; 1233 end = start + skb_shinfo(skb)->frags[i].size;
1217 if ((copy = end - offset) > 0) { 1234 if ((copy = end - offset) > 0) {
1218 __wsum csum2; 1235 __wsum csum2;
1219 u8 *vaddr; 1236 u8 *vaddr;
@@ -1222,8 +1239,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1222 if (copy > len) 1239 if (copy > len)
1223 copy = len; 1240 copy = len;
1224 vaddr = kmap_skb_frag(frag); 1241 vaddr = kmap_skb_frag(frag);
1225 csum2 = csum_partial(vaddr + frag->page_offset, 1242 csum2 = csum_partial(vaddr + frag->page_offset +
1226 copy, 0); 1243 offset - start, copy, 0);
1227 kunmap_skb_frag(vaddr); 1244 kunmap_skb_frag(vaddr);
1228 csum = csum_block_add(csum, csum2, pos); 1245 csum = csum_block_add(csum, csum2, pos);
1229 if (!(len -= copy)) 1246 if (!(len -= copy))
@@ -1231,26 +1248,31 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1231 offset += copy; 1248 offset += copy;
1232 pos += copy; 1249 pos += copy;
1233 } 1250 }
1251 start = end;
1234 } 1252 }
1235 1253
1236 if (skb_shinfo(skb)->frag_list) { 1254 if (skb_shinfo(skb)->frag_list) {
1237 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1255 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1238 1256
1239 for (; list; list = list->next) { 1257 for (; list; list = list->next) {
1240 BUG_TRAP(len >= 0); 1258 int end;
1241 1259
1242 end = offset + list->len; 1260 BUG_TRAP(start <= offset + len);
1261
1262 end = start + list->len;
1243 if ((copy = end - offset) > 0) { 1263 if ((copy = end - offset) > 0) {
1244 __wsum csum2; 1264 __wsum csum2;
1245 if (copy > len) 1265 if (copy > len)
1246 copy = len; 1266 copy = len;
1247 csum2 = skb_checksum(list, 0, copy, 0); 1267 csum2 = skb_checksum(list, offset - start,
1268 copy, 0);
1248 csum = csum_block_add(csum, csum2, pos); 1269 csum = csum_block_add(csum, csum2, pos);
1249 if ((len -= copy) == 0) 1270 if ((len -= copy) == 0)
1250 return csum; 1271 return csum;
1251 offset += copy; 1272 offset += copy;
1252 pos += copy; 1273 pos += copy;
1253 } 1274 }
1275 start = end;
1254 } 1276 }
1255 } 1277 }
1256 BUG_ON(len); 1278 BUG_ON(len);
@@ -1263,8 +1285,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1263__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1285__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1264 u8 *to, int len, __wsum csum) 1286 u8 *to, int len, __wsum csum)
1265{ 1287{
1266 int end = skb_headlen(skb); 1288 int start = skb_headlen(skb);
1267 int i, copy = end - offset; 1289 int i, copy = start - offset;
1268 int pos = 0; 1290 int pos = 0;
1269 1291
1270 /* Copy header. */ 1292 /* Copy header. */
@@ -1281,9 +1303,11 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1281 } 1303 }
1282 1304
1283 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1305 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1284 BUG_TRAP(len >= 0); 1306 int end;
1307
1308 BUG_TRAP(start <= offset + len);
1285 1309
1286 end = offset + skb_shinfo(skb)->frags[i].size; 1310 end = start + skb_shinfo(skb)->frags[i].size;
1287 if ((copy = end - offset) > 0) { 1311 if ((copy = end - offset) > 0) {
1288 __wsum csum2; 1312 __wsum csum2;
1289 u8 *vaddr; 1313 u8 *vaddr;
@@ -1293,8 +1317,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1293 copy = len; 1317 copy = len;
1294 vaddr = kmap_skb_frag(frag); 1318 vaddr = kmap_skb_frag(frag);
1295 csum2 = csum_partial_copy_nocheck(vaddr + 1319 csum2 = csum_partial_copy_nocheck(vaddr +
1296 frag->page_offset, 1320 frag->page_offset +
1297 to, copy, 0); 1321 offset - start, to,
1322 copy, 0);
1298 kunmap_skb_frag(vaddr); 1323 kunmap_skb_frag(vaddr);
1299 csum = csum_block_add(csum, csum2, pos); 1324 csum = csum_block_add(csum, csum2, pos);
1300 if (!(len -= copy)) 1325 if (!(len -= copy))
@@ -1303,6 +1328,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1303 to += copy; 1328 to += copy;
1304 pos += copy; 1329 pos += copy;
1305 } 1330 }
1331 start = end;
1306 } 1332 }
1307 1333
1308 if (skb_shinfo(skb)->frag_list) { 1334 if (skb_shinfo(skb)->frag_list) {
@@ -1310,13 +1336,16 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1310 1336
1311 for (; list; list = list->next) { 1337 for (; list; list = list->next) {
1312 __wsum csum2; 1338 __wsum csum2;
1313 BUG_TRAP(len >= 0); 1339 int end;
1340
1341 BUG_TRAP(start <= offset + len);
1314 1342
1315 end = offset + list->len; 1343 end = start + list->len;
1316 if ((copy = end - offset) > 0) { 1344 if ((copy = end - offset) > 0) {
1317 if (copy > len) 1345 if (copy > len)
1318 copy = len; 1346 copy = len;
1319 csum2 = skb_copy_and_csum_bits(list, 0, 1347 csum2 = skb_copy_and_csum_bits(list,
1348 offset - start,
1320 to, copy, 0); 1349 to, copy, 0);
1321 csum = csum_block_add(csum, csum2, pos); 1350 csum = csum_block_add(csum, csum2, pos);
1322 if ((len -= copy) == 0) 1351 if ((len -= copy) == 0)
@@ -1325,6 +1354,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1325 to += copy; 1354 to += copy;
1326 pos += copy; 1355 pos += copy;
1327 } 1356 }
1357 start = end;
1328 } 1358 }
1329 } 1359 }
1330 BUG_ON(len); 1360 BUG_ON(len);
@@ -1996,8 +2026,8 @@ void __init skb_init(void)
1996int 2026int
1997skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2027skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
1998{ 2028{
1999 int end = skb_headlen(skb); 2029 int start = skb_headlen(skb);
2000 int i, copy = end - offset; 2030 int i, copy = start - offset;
2001 int elt = 0; 2031 int elt = 0;
2002 2032
2003 if (copy > 0) { 2033 if (copy > 0) {
@@ -2013,39 +2043,45 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2013 } 2043 }
2014 2044
2015 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2045 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2016 BUG_TRAP(len >= 0); 2046 int end;
2017 2047
2018 end = offset + skb_shinfo(skb)->frags[i].size; 2048 BUG_TRAP(start <= offset + len);
2049
2050 end = start + skb_shinfo(skb)->frags[i].size;
2019 if ((copy = end - offset) > 0) { 2051 if ((copy = end - offset) > 0) {
2020 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2052 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2021 2053
2022 if (copy > len) 2054 if (copy > len)
2023 copy = len; 2055 copy = len;
2024 sg[elt].page = frag->page; 2056 sg[elt].page = frag->page;
2025 sg[elt].offset = frag->page_offset; 2057 sg[elt].offset = frag->page_offset+offset-start;
2026 sg[elt].length = copy; 2058 sg[elt].length = copy;
2027 elt++; 2059 elt++;
2028 if (!(len -= copy)) 2060 if (!(len -= copy))
2029 return elt; 2061 return elt;
2030 offset += copy; 2062 offset += copy;
2031 } 2063 }
2064 start = end;
2032 } 2065 }
2033 2066
2034 if (skb_shinfo(skb)->frag_list) { 2067 if (skb_shinfo(skb)->frag_list) {
2035 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2068 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2036 2069
2037 for (; list; list = list->next) { 2070 for (; list; list = list->next) {
2038 BUG_TRAP(len >= 0); 2071 int end;
2072
2073 BUG_TRAP(start <= offset + len);
2039 2074
2040 end = offset + list->len; 2075 end = start + list->len;
2041 if ((copy = end - offset) > 0) { 2076 if ((copy = end - offset) > 0) {
2042 if (copy > len) 2077 if (copy > len)
2043 copy = len; 2078 copy = len;
2044 elt += skb_to_sgvec(list, sg+elt, 0, copy); 2079 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
2045 if ((len -= copy) == 0) 2080 if ((len -= copy) == 0)
2046 return elt; 2081 return elt;
2047 offset += copy; 2082 offset += copy;
2048 } 2083 }
2084 start = end;
2049 } 2085 }
2050 } 2086 }
2051 BUG_ON(len); 2087 BUG_ON(len);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 89241cdeea3f..0ad1cd57bc39 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -49,8 +49,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
49 struct sk_buff *skb, int offset, struct iovec *to, 49 struct sk_buff *skb, int offset, struct iovec *to,
50 size_t len, struct dma_pinned_list *pinned_list) 50 size_t len, struct dma_pinned_list *pinned_list)
51{ 51{
52 int end = skb_headlen(skb); 52 int start = skb_headlen(skb);
53 int i, copy = end - offset; 53 int i, copy = start - offset;
54 dma_cookie_t cookie = 0; 54 dma_cookie_t cookie = 0;
55 55
56 /* Copy header. */ 56 /* Copy header. */
@@ -69,9 +69,11 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
69 69
70 /* Copy paged appendix. Hmm... why does this look so complicated? */ 70 /* Copy paged appendix. Hmm... why does this look so complicated? */
71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
72 BUG_TRAP(len >= 0); 72 int end;
73 73
74 end = offset + skb_shinfo(skb)->frags[i].size; 74 BUG_TRAP(start <= offset + len);
75
76 end = start + skb_shinfo(skb)->frags[i].size;
75 copy = end - offset; 77 copy = end - offset;
76 if ((copy = end - offset) > 0) { 78 if ((copy = end - offset) > 0) {
77 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -80,8 +82,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
80 if (copy > len) 82 if (copy > len)
81 copy = len; 83 copy = len;
82 84
83 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, 85 cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
84 page, frag->page_offset, copy); 86 frag->page_offset + offset - start, copy);
85 if (cookie < 0) 87 if (cookie < 0)
86 goto fault; 88 goto fault;
87 len -= copy; 89 len -= copy;
@@ -89,21 +91,25 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
89 goto end; 91 goto end;
90 offset += copy; 92 offset += copy;
91 } 93 }
94 start = end;
92 } 95 }
93 96
94 if (skb_shinfo(skb)->frag_list) { 97 if (skb_shinfo(skb)->frag_list) {
95 struct sk_buff *list = skb_shinfo(skb)->frag_list; 98 struct sk_buff *list = skb_shinfo(skb)->frag_list;
96 99
97 for (; list; list = list->next) { 100 for (; list; list = list->next) {
98 BUG_TRAP(len >= 0); 101 int end;
102
103 BUG_TRAP(start <= offset + len);
99 104
100 end = offset + list->len; 105 end = start + list->len;
101 copy = end - offset; 106 copy = end - offset;
102 if (copy > 0) { 107 if (copy > 0) {
103 if (copy > len) 108 if (copy > len)
104 copy = len; 109 copy = len;
105 cookie = dma_skb_copy_datagram_iovec(chan, list, 110 cookie = dma_skb_copy_datagram_iovec(chan, list,
106 0, to, copy, pinned_list); 111 offset - start, to, copy,
112 pinned_list);
107 if (cookie < 0) 113 if (cookie < 0)
108 goto fault; 114 goto fault;
109 len -= copy; 115 len -= copy;
@@ -111,6 +117,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
111 goto end; 117 goto end;
112 offset += copy; 118 offset += copy;
113 } 119 }
120 start = end;
114 } 121 }
115 } 122 }
116 123