diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/splice.c | 33 |
1 files changed, 11 insertions, 22 deletions
diff --git a/fs/splice.c b/fs/splice.c index 27f5e3738a7b..0b202425b0b5 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -78,6 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, | |||
78 | return 1; | 78 | return 1; |
79 | } | 79 | } |
80 | 80 | ||
81 | buf->flags |= PIPE_BUF_FLAG_LRU; | ||
81 | return 0; | 82 | return 0; |
82 | } | 83 | } |
83 | 84 | ||
@@ -85,6 +86,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info, | |||
85 | struct pipe_buffer *buf) | 86 | struct pipe_buffer *buf) |
86 | { | 87 | { |
87 | page_cache_release(buf->page); | 88 | page_cache_release(buf->page); |
89 | buf->flags &= ~PIPE_BUF_FLAG_LRU; | ||
88 | } | 90 | } |
89 | 91 | ||
90 | static int page_cache_pipe_buf_pin(struct pipe_inode_info *info, | 92 | static int page_cache_pipe_buf_pin(struct pipe_inode_info *info, |
@@ -141,6 +143,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, | |||
141 | if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) | 143 | if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) |
142 | return 1; | 144 | return 1; |
143 | 145 | ||
146 | buf->flags |= PIPE_BUF_FLAG_LRU; | ||
144 | return generic_pipe_buf_steal(pipe, buf); | 147 | return generic_pipe_buf_steal(pipe, buf); |
145 | } | 148 | } |
146 | 149 | ||
@@ -566,37 +569,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | |||
566 | */ | 569 | */ |
567 | if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { | 570 | if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { |
568 | /* | 571 | /* |
569 | * If steal succeeds, buf->page is now pruned from the vm | 572 | * If steal succeeds, buf->page is now pruned from the |
570 | * side (page cache) and we can reuse it. The page will also | 573 | * pagecache and we can reuse it. The page will also be |
571 | * be locked on successful return. | 574 | * locked on successful return. |
572 | */ | 575 | */ |
573 | if (buf->ops->steal(info, buf)) | 576 | if (buf->ops->steal(info, buf)) |
574 | goto find_page; | 577 | goto find_page; |
575 | 578 | ||
576 | page = buf->page; | 579 | page = buf->page; |
577 | page_cache_get(page); | ||
578 | |||
579 | /* | ||
580 | * page must be on the LRU for adding to the pagecache. | ||
581 | * Check this without grabbing the zone lock, if it isn't | ||
582 | * the do grab the zone lock, recheck, and add if necessary. | ||
583 | */ | ||
584 | if (!PageLRU(page)) { | ||
585 | struct zone *zone = page_zone(page); | ||
586 | |||
587 | spin_lock_irq(&zone->lru_lock); | ||
588 | if (!PageLRU(page)) { | ||
589 | SetPageLRU(page); | ||
590 | add_page_to_inactive_list(zone, page); | ||
591 | } | ||
592 | spin_unlock_irq(&zone->lru_lock); | ||
593 | } | ||
594 | |||
595 | if (add_to_page_cache(page, mapping, index, gfp_mask)) { | 580 | if (add_to_page_cache(page, mapping, index, gfp_mask)) { |
596 | page_cache_release(page); | ||
597 | unlock_page(page); | 581 | unlock_page(page); |
598 | goto find_page; | 582 | goto find_page; |
599 | } | 583 | } |
584 | |||
585 | page_cache_get(page); | ||
586 | |||
587 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) | ||
588 | lru_cache_add(page); | ||
600 | } else { | 589 | } else { |
601 | find_page: | 590 | find_page: |
602 | page = find_lock_page(mapping, index); | 591 | page = find_lock_page(mapping, index); |