diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 85 |
1 files changed, 42 insertions, 43 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 65eac7739033..da74b844f4ea 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -73,17 +73,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly; | |||
73 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, | 73 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, |
74 | struct pipe_buffer *buf) | 74 | struct pipe_buffer *buf) |
75 | { | 75 | { |
76 | struct sk_buff *skb = (struct sk_buff *) buf->private; | 76 | put_page(buf->page); |
77 | |||
78 | kfree_skb(skb); | ||
79 | } | 77 | } |
80 | 78 | ||
81 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, | 79 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, |
82 | struct pipe_buffer *buf) | 80 | struct pipe_buffer *buf) |
83 | { | 81 | { |
84 | struct sk_buff *skb = (struct sk_buff *) buf->private; | 82 | get_page(buf->page); |
85 | |||
86 | skb_get(skb); | ||
87 | } | 83 | } |
88 | 84 | ||
89 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, | 85 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, |
@@ -1334,9 +1330,19 @@ fault: | |||
1334 | */ | 1330 | */ |
1335 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 1331 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
1336 | { | 1332 | { |
1337 | struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private; | 1333 | put_page(spd->pages[i]); |
1334 | } | ||
1338 | 1335 | ||
1339 | kfree_skb(skb); | 1336 | static inline struct page *linear_to_page(struct page *page, unsigned int len, |
1337 | unsigned int offset) | ||
1338 | { | ||
1339 | struct page *p = alloc_pages(GFP_KERNEL, 0); | ||
1340 | |||
1341 | if (!p) | ||
1342 | return NULL; | ||
1343 | memcpy(page_address(p) + offset, page_address(page) + offset, len); | ||
1344 | |||
1345 | return p; | ||
1340 | } | 1346 | } |
1341 | 1347 | ||
1342 | /* | 1348 | /* |
@@ -1344,16 +1350,23 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1344 | */ | 1350 | */ |
1345 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1351 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1346 | unsigned int len, unsigned int offset, | 1352 | unsigned int len, unsigned int offset, |
1347 | struct sk_buff *skb) | 1353 | struct sk_buff *skb, int linear) |
1348 | { | 1354 | { |
1349 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1355 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1350 | return 1; | 1356 | return 1; |
1351 | 1357 | ||
1358 | if (linear) { | ||
1359 | page = linear_to_page(page, len, offset); | ||
1360 | if (!page) | ||
1361 | return 1; | ||
1362 | } else | ||
1363 | get_page(page); | ||
1364 | |||
1352 | spd->pages[spd->nr_pages] = page; | 1365 | spd->pages[spd->nr_pages] = page; |
1353 | spd->partial[spd->nr_pages].len = len; | 1366 | spd->partial[spd->nr_pages].len = len; |
1354 | spd->partial[spd->nr_pages].offset = offset; | 1367 | spd->partial[spd->nr_pages].offset = offset; |
1355 | spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb); | ||
1356 | spd->nr_pages++; | 1368 | spd->nr_pages++; |
1369 | |||
1357 | return 0; | 1370 | return 0; |
1358 | } | 1371 | } |
1359 | 1372 | ||
@@ -1369,7 +1382,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, | |||
1369 | static inline int __splice_segment(struct page *page, unsigned int poff, | 1382 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1370 | unsigned int plen, unsigned int *off, | 1383 | unsigned int plen, unsigned int *off, |
1371 | unsigned int *len, struct sk_buff *skb, | 1384 | unsigned int *len, struct sk_buff *skb, |
1372 | struct splice_pipe_desc *spd) | 1385 | struct splice_pipe_desc *spd, int linear) |
1373 | { | 1386 | { |
1374 | if (!*len) | 1387 | if (!*len) |
1375 | return 1; | 1388 | return 1; |
@@ -1392,7 +1405,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1392 | /* the linear region may spread across several pages */ | 1405 | /* the linear region may spread across several pages */ |
1393 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1406 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1394 | 1407 | ||
1395 | if (spd_fill_page(spd, page, flen, poff, skb)) | 1408 | if (spd_fill_page(spd, page, flen, poff, skb, linear)) |
1396 | return 1; | 1409 | return 1; |
1397 | 1410 | ||
1398 | __segment_seek(&page, &poff, &plen, flen); | 1411 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1419,7 +1432,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1419 | if (__splice_segment(virt_to_page(skb->data), | 1432 | if (__splice_segment(virt_to_page(skb->data), |
1420 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1433 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1421 | skb_headlen(skb), | 1434 | skb_headlen(skb), |
1422 | offset, len, skb, spd)) | 1435 | offset, len, skb, spd, 1)) |
1423 | return 1; | 1436 | return 1; |
1424 | 1437 | ||
1425 | /* | 1438 | /* |
@@ -1429,7 +1442,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1429 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1442 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1430 | 1443 | ||
1431 | if (__splice_segment(f->page, f->page_offset, f->size, | 1444 | if (__splice_segment(f->page, f->page_offset, f->size, |
1432 | offset, len, skb, spd)) | 1445 | offset, len, skb, spd, 0)) |
1433 | return 1; | 1446 | return 1; |
1434 | } | 1447 | } |
1435 | 1448 | ||
@@ -1442,7 +1455,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1442 | * the frag list, if such a thing exists. We'd probably need to recurse to | 1455 | * the frag list, if such a thing exists. We'd probably need to recurse to |
1443 | * handle that cleanly. | 1456 | * handle that cleanly. |
1444 | */ | 1457 | */ |
1445 | int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | 1458 | int skb_splice_bits(struct sk_buff *skb, unsigned int offset, |
1446 | struct pipe_inode_info *pipe, unsigned int tlen, | 1459 | struct pipe_inode_info *pipe, unsigned int tlen, |
1447 | unsigned int flags) | 1460 | unsigned int flags) |
1448 | { | 1461 | { |
@@ -1455,16 +1468,6 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | |||
1455 | .ops = &sock_pipe_buf_ops, | 1468 | .ops = &sock_pipe_buf_ops, |
1456 | .spd_release = sock_spd_release, | 1469 | .spd_release = sock_spd_release, |
1457 | }; | 1470 | }; |
1458 | struct sk_buff *skb; | ||
1459 | |||
1460 | /* | ||
1461 | * I'd love to avoid the clone here, but tcp_read_sock() | ||
1462 | * ignores reference counts and unconditonally kills the sk_buff | ||
1463 | * on return from the actor. | ||
1464 | */ | ||
1465 | skb = skb_clone(__skb, GFP_KERNEL); | ||
1466 | if (unlikely(!skb)) | ||
1467 | return -ENOMEM; | ||
1468 | 1471 | ||
1469 | /* | 1472 | /* |
1470 | * __skb_splice_bits() only fails if the output has no room left, | 1473 | * __skb_splice_bits() only fails if the output has no room left, |
@@ -1488,15 +1491,9 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, | |||
1488 | } | 1491 | } |
1489 | 1492 | ||
1490 | done: | 1493 | done: |
1491 | /* | ||
1492 | * drop our reference to the clone, the pipe consumption will | ||
1493 | * drop the rest. | ||
1494 | */ | ||
1495 | kfree_skb(skb); | ||
1496 | |||
1497 | if (spd.nr_pages) { | 1494 | if (spd.nr_pages) { |
1495 | struct sock *sk = skb->sk; | ||
1498 | int ret; | 1496 | int ret; |
1499 | struct sock *sk = __skb->sk; | ||
1500 | 1497 | ||
1501 | /* | 1498 | /* |
1502 | * Drop the socket lock, otherwise we have reverse | 1499 | * Drop the socket lock, otherwise we have reverse |
@@ -2215,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, | |||
2215 | return 0; | 2212 | return 0; |
2216 | 2213 | ||
2217 | next_skb: | 2214 | next_skb: |
2218 | block_limit = skb_headlen(st->cur_skb); | 2215 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
2219 | 2216 | ||
2220 | if (abs_offset < block_limit) { | 2217 | if (abs_offset < block_limit) { |
2221 | *data = st->cur_skb->data + abs_offset; | 2218 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
2222 | return block_limit - abs_offset; | 2219 | return block_limit - abs_offset; |
2223 | } | 2220 | } |
2224 | 2221 | ||
@@ -2253,13 +2250,14 @@ next_skb: | |||
2253 | st->frag_data = NULL; | 2250 | st->frag_data = NULL; |
2254 | } | 2251 | } |
2255 | 2252 | ||
2256 | if (st->cur_skb->next) { | 2253 | if (st->root_skb == st->cur_skb && |
2257 | st->cur_skb = st->cur_skb->next; | 2254 | skb_shinfo(st->root_skb)->frag_list) { |
2255 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | ||
2258 | st->frag_idx = 0; | 2256 | st->frag_idx = 0; |
2259 | goto next_skb; | 2257 | goto next_skb; |
2260 | } else if (st->root_skb == st->cur_skb && | 2258 | } else if (st->cur_skb->next) { |
2261 | skb_shinfo(st->root_skb)->frag_list) { | 2259 | st->cur_skb = st->cur_skb->next; |
2262 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | 2260 | st->frag_idx = 0; |
2263 | goto next_skb; | 2261 | goto next_skb; |
2264 | } | 2262 | } |
2265 | 2263 | ||
@@ -2588,8 +2586,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2588 | struct sk_buff *nskb; | 2586 | struct sk_buff *nskb; |
2589 | unsigned int headroom; | 2587 | unsigned int headroom; |
2590 | unsigned int hlen = p->data - skb_mac_header(p); | 2588 | unsigned int hlen = p->data - skb_mac_header(p); |
2589 | unsigned int len = skb->len; | ||
2591 | 2590 | ||
2592 | if (hlen + p->len + skb->len >= 65536) | 2591 | if (hlen + p->len + len >= 65536) |
2593 | return -E2BIG; | 2592 | return -E2BIG; |
2594 | 2593 | ||
2595 | if (skb_shinfo(p)->frag_list) | 2594 | if (skb_shinfo(p)->frag_list) |
@@ -2651,9 +2650,9 @@ merge: | |||
2651 | 2650 | ||
2652 | done: | 2651 | done: |
2653 | NAPI_GRO_CB(p)->count++; | 2652 | NAPI_GRO_CB(p)->count++; |
2654 | p->data_len += skb->len; | 2653 | p->data_len += len; |
2655 | p->truesize += skb->len; | 2654 | p->truesize += len; |
2656 | p->len += skb->len; | 2655 | p->len += len; |
2657 | 2656 | ||
2658 | NAPI_GRO_CB(skb)->same_flow = 1; | 2657 | NAPI_GRO_CB(skb)->same_flow = 1; |
2659 | return 0; | 2658 | return 0; |