aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2009-02-01 03:41:42 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-01 03:41:42 -0500
commit4fb669948116d928ae44262ab7743732c574630d (patch)
tree0be7481f6339fe4cb32faf4047f15f7cc962babc /net/core/skbuff.c
parent31f4574774e98aa275aeeee94f41ce042285ed8e (diff)
net: Optimize memory usage when splicing from sockets.
The recent fix of data corruption when splicing from sockets uses memory very inefficiently allocating a new page to copy each chunk of linear part of skb. This patch uses the same page until it's full (almost) by caching the page in sk_sndmsg_page field. With changes from David S. Miller <davem@davemloft.net> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c47
1 files changed, 36 insertions, 11 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f20e758fe46b..e55d1ef5690d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1333,14 +1333,39 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1333 put_page(spd->pages[i]); 1333 put_page(spd->pages[i]);
1334} 1334}
1335 1335
1336static inline struct page *linear_to_page(struct page *page, unsigned int len, 1336static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1337 unsigned int offset) 1337 unsigned int *offset,
1338{ 1338 struct sk_buff *skb)
1339 struct page *p = alloc_pages(GFP_KERNEL, 0); 1339{
1340 struct sock *sk = skb->sk;
1341 struct page *p = sk->sk_sndmsg_page;
1342 unsigned int off;
1343
1344 if (!p) {
1345new_page:
1346 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1347 if (!p)
1348 return NULL;
1340 1349
1341 if (!p) 1350 off = sk->sk_sndmsg_off = 0;
1342 return NULL; 1351 /* hold one ref to this page until it's full */
1343 memcpy(page_address(p) + offset, page_address(page) + offset, len); 1352 } else {
1353 unsigned int mlen;
1354
1355 off = sk->sk_sndmsg_off;
1356 mlen = PAGE_SIZE - off;
1357 if (mlen < 64 && mlen < *len) {
1358 put_page(p);
1359 goto new_page;
1360 }
1361
1362 *len = min_t(unsigned int, *len, mlen);
1363 }
1364
1365 memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1366 sk->sk_sndmsg_off += *len;
1367 *offset = off;
1368 get_page(p);
1344 1369
1345 return p; 1370 return p;
1346} 1371}
@@ -1349,21 +1374,21 @@ static inline struct page *linear_to_page(struct page *page, unsigned int len,
1349 * Fill page/offset/length into spd, if it can hold more pages. 1374 * Fill page/offset/length into spd, if it can hold more pages.
1350 */ 1375 */
1351static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1376static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1352 unsigned int len, unsigned int offset, 1377 unsigned int *len, unsigned int offset,
1353 struct sk_buff *skb, int linear) 1378 struct sk_buff *skb, int linear)
1354{ 1379{
1355 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1380 if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1356 return 1; 1381 return 1;
1357 1382
1358 if (linear) { 1383 if (linear) {
1359 page = linear_to_page(page, len, offset); 1384 page = linear_to_page(page, len, &offset, skb);
1360 if (!page) 1385 if (!page)
1361 return 1; 1386 return 1;
1362 } else 1387 } else
1363 get_page(page); 1388 get_page(page);
1364 1389
1365 spd->pages[spd->nr_pages] = page; 1390 spd->pages[spd->nr_pages] = page;
1366 spd->partial[spd->nr_pages].len = len; 1391 spd->partial[spd->nr_pages].len = *len;
1367 spd->partial[spd->nr_pages].offset = offset; 1392 spd->partial[spd->nr_pages].offset = offset;
1368 spd->nr_pages++; 1393 spd->nr_pages++;
1369 1394
@@ -1405,7 +1430,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1405 /* the linear region may spread across several pages */ 1430 /* the linear region may spread across several pages */
1406 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1431 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1407 1432
1408 if (spd_fill_page(spd, page, flen, poff, skb, linear)) 1433 if (spd_fill_page(spd, page, &flen, poff, skb, linear))
1409 return 1; 1434 return 1;
1410 1435
1411 __segment_seek(&page, &poff, &plen, flen); 1436 __segment_seek(&page, &poff, &plen, flen);