aboutsummaryrefslogtreecommitdiffstats
path: root/fs/splice.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-05-03 04:35:26 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-05-04 00:55:12 -0400
commit1432873af7ae29d4bb3c56114c05b539d078ca62 (patch)
treecf4f72608d2e10f7ff786b9d60067963f1ab4ca9 /fs/splice.c
parentbfc4ee39fdbb2deb8864785d5e5bc5cdd3b31a69 (diff)
[PATCH] splice: LRU fixups
Nick says that the current construct isn't safe. This goes back to the original, but sets PIPE_BUF_FLAG_LRU on user pages as well as they all seem to be on the LRU in the first place. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c33
1 files changed, 11 insertions, 22 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 27f5e3738a7b..0b202425b0b5 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -78,6 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
78 return 1; 78 return 1;
79 } 79 }
80 80
81 buf->flags |= PIPE_BUF_FLAG_LRU;
81 return 0; 82 return 0;
82} 83}
83 84
@@ -85,6 +86,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
85 struct pipe_buffer *buf) 86 struct pipe_buffer *buf)
86{ 87{
87 page_cache_release(buf->page); 88 page_cache_release(buf->page);
89 buf->flags &= ~PIPE_BUF_FLAG_LRU;
88} 90}
89 91
90static int page_cache_pipe_buf_pin(struct pipe_inode_info *info, 92static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
@@ -141,6 +143,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
141 if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) 143 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
142 return 1; 144 return 1;
143 145
146 buf->flags |= PIPE_BUF_FLAG_LRU;
144 return generic_pipe_buf_steal(pipe, buf); 147 return generic_pipe_buf_steal(pipe, buf);
145} 148}
146 149
@@ -566,37 +569,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
566 */ 569 */
567 if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { 570 if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
568 /* 571 /*
569 * If steal succeeds, buf->page is now pruned from the vm 572 * If steal succeeds, buf->page is now pruned from the
570 * side (page cache) and we can reuse it. The page will also 573 * pagecache and we can reuse it. The page will also be
571 * be locked on successful return. 574 * locked on successful return.
572 */ 575 */
573 if (buf->ops->steal(info, buf)) 576 if (buf->ops->steal(info, buf))
574 goto find_page; 577 goto find_page;
575 578
576 page = buf->page; 579 page = buf->page;
577 page_cache_get(page);
578
579 /*
580 * page must be on the LRU for adding to the pagecache.
581 * Check this without grabbing the zone lock, if it isn't
582 * the do grab the zone lock, recheck, and add if necessary.
583 */
584 if (!PageLRU(page)) {
585 struct zone *zone = page_zone(page);
586
587 spin_lock_irq(&zone->lru_lock);
588 if (!PageLRU(page)) {
589 SetPageLRU(page);
590 add_page_to_inactive_list(zone, page);
591 }
592 spin_unlock_irq(&zone->lru_lock);
593 }
594
595 if (add_to_page_cache(page, mapping, index, gfp_mask)) { 580 if (add_to_page_cache(page, mapping, index, gfp_mask)) {
596 page_cache_release(page);
597 unlock_page(page); 581 unlock_page(page);
598 goto find_page; 582 goto find_page;
599 } 583 }
584
585 page_cache_get(page);
586
587 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
588 lru_cache_add(page);
600 } else { 589 } else {
601find_page: 590find_page:
602 page = find_lock_page(mapping, index); 591 page = find_lock_page(mapping, index);