aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c51
1 files changed, 33 insertions, 18 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 93595c327bbd..47263ac3e4ea 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,11 +29,6 @@
29#include <linux/security.h> 29#include <linux/security.h>
30#include <linux/syscalls.h> 30#include <linux/syscalls.h>
31/* 31/*
32 * This is needed for the following functions:
33 * - try_to_release_page
34 * - block_invalidatepage
35 * - generic_osync_inode
36 *
37 * FIXME: remove all knowledge of the buffer layer from the core VM 32 * FIXME: remove all knowledge of the buffer layer from the core VM
38 */ 33 */
39#include <linux/buffer_head.h> /* for generic_osync_inode */ 34#include <linux/buffer_head.h> /* for generic_osync_inode */
@@ -123,8 +118,7 @@ void remove_from_page_cache(struct page *page)
123{ 118{
124 struct address_space *mapping = page->mapping; 119 struct address_space *mapping = page->mapping;
125 120
126 if (unlikely(!PageLocked(page))) 121 BUG_ON(!PageLocked(page));
127 PAGE_BUG(page);
128 122
129 write_lock_irq(&mapping->tree_lock); 123 write_lock_irq(&mapping->tree_lock);
130 __remove_from_page_cache(page); 124 __remove_from_page_cache(page);
@@ -139,7 +133,25 @@ static int sync_page(void *word)
139 page = container_of((page_flags_t *)word, struct page, flags); 133 page = container_of((page_flags_t *)word, struct page, flags);
140 134
141 /* 135 /*
142 * FIXME, fercrissake. What is this barrier here for? 136 * page_mapping() is being called without PG_locked held.
137 * Some knowledge of the state and use of the page is used to
138 * reduce the requirements down to a memory barrier.
139 * The danger here is of a stale page_mapping() return value
140 * indicating a struct address_space different from the one it's
141 * associated with when it is associated with one.
142 * After smp_mb(), it's either the correct page_mapping() for
143 * the page, or an old page_mapping() and the page's own
144 * page_mapping() has gone NULL.
145 * The ->sync_page() address_space operation must tolerate
146 * page_mapping() going NULL. By an amazing coincidence,
147 * this comes about because none of the users of the page
148 * in the ->sync_page() methods make essential use of the
149 * page_mapping(), merely passing the page down to the backing
150 * device's unplug functions when it's non-NULL, which in turn
151 * ignore it for all cases but swap, where only page->private is
152 * of interest. When page_mapping() does go NULL, the entire
153 * call stack gracefully ignores the page and returns.
154 * -- wli
143 */ 155 */
144 smp_mb(); 156 smp_mb();
145 mapping = page_mapping(page); 157 mapping = page_mapping(page);
@@ -152,9 +164,10 @@ static int sync_page(void *word)
152/** 164/**
153 * filemap_fdatawrite_range - start writeback against all of a mapping's 165 * filemap_fdatawrite_range - start writeback against all of a mapping's
154 * dirty pages that lie within the byte offsets <start, end> 166 * dirty pages that lie within the byte offsets <start, end>
155 * @mapping: address space structure to write 167 * @mapping: address space structure to write
156 * @start: offset in bytes where the range starts 168 * @start: offset in bytes where the range starts
157 * @end : offset in bytes where the range ends 169 * @end: offset in bytes where the range ends
170 * @sync_mode: enable synchronous operation
158 * 171 *
159 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 172 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
160 * opposed to a regular memory * cleansing writeback. The difference between 173 * opposed to a regular memory * cleansing writeback. The difference between
@@ -518,8 +531,8 @@ EXPORT_SYMBOL(find_trylock_page);
518/** 531/**
519 * find_lock_page - locate, pin and lock a pagecache page 532 * find_lock_page - locate, pin and lock a pagecache page
520 * 533 *
521 * @mapping - the address_space to search 534 * @mapping: the address_space to search
522 * @offset - the page index 535 * @offset: the page index
523 * 536 *
524 * Locates the desired pagecache page, locks it, increments its reference 537 * Locates the desired pagecache page, locks it, increments its reference
525 * count and returns its address. 538 * count and returns its address.
@@ -558,9 +571,9 @@ EXPORT_SYMBOL(find_lock_page);
558/** 571/**
559 * find_or_create_page - locate or add a pagecache page 572 * find_or_create_page - locate or add a pagecache page
560 * 573 *
561 * @mapping - the page's address_space 574 * @mapping: the page's address_space
562 * @index - the page's index into the mapping 575 * @index: the page's index into the mapping
563 * @gfp_mask - page allocation mode 576 * @gfp_mask: page allocation mode
564 * 577 *
565 * Locates a page in the pagecache. If the page is not present, a new page 578 * Locates a page in the pagecache. If the page is not present, a new page
566 * is allocated using @gfp_mask and is added to the pagecache and to the VM's 579 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
@@ -1949,7 +1962,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1949 buf = iov->iov_base + written; 1962 buf = iov->iov_base + written;
1950 else { 1963 else {
1951 filemap_set_next_iovec(&cur_iov, &iov_base, written); 1964 filemap_set_next_iovec(&cur_iov, &iov_base, written);
1952 buf = iov->iov_base + iov_base; 1965 buf = cur_iov->iov_base + iov_base;
1953 } 1966 }
1954 1967
1955 do { 1968 do {
@@ -2007,9 +2020,11 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2007 count -= status; 2020 count -= status;
2008 pos += status; 2021 pos += status;
2009 buf += status; 2022 buf += status;
2010 if (unlikely(nr_segs > 1)) 2023 if (unlikely(nr_segs > 1)) {
2011 filemap_set_next_iovec(&cur_iov, 2024 filemap_set_next_iovec(&cur_iov,
2012 &iov_base, status); 2025 &iov_base, status);
2026 buf = cur_iov->iov_base + iov_base;
2027 }
2013 } 2028 }
2014 } 2029 }
2015 if (unlikely(copied != bytes)) 2030 if (unlikely(copied != bytes))