aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c46
1 files changed, 33 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 93595c327bbd..d5fdae2eb183 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -123,8 +123,7 @@ void remove_from_page_cache(struct page *page)
123{ 123{
124 struct address_space *mapping = page->mapping; 124 struct address_space *mapping = page->mapping;
125 125
126 if (unlikely(!PageLocked(page))) 126 BUG_ON(!PageLocked(page));
127 PAGE_BUG(page);
128 127
129 write_lock_irq(&mapping->tree_lock); 128 write_lock_irq(&mapping->tree_lock);
130 __remove_from_page_cache(page); 129 __remove_from_page_cache(page);
@@ -139,7 +138,25 @@ static int sync_page(void *word)
139 page = container_of((page_flags_t *)word, struct page, flags); 138 page = container_of((page_flags_t *)word, struct page, flags);
140 139
141 /* 140 /*
142 * FIXME, fercrissake. What is this barrier here for? 141 * page_mapping() is being called without PG_locked held.
142 * Some knowledge of the state and use of the page is used to
143 * reduce the requirements down to a memory barrier.
144 * The danger here is of a stale page_mapping() return value
145 * indicating a struct address_space different from the one it's
146 * associated with when it is associated with one.
147 * After smp_mb(), it's either the correct page_mapping() for
148 * the page, or an old page_mapping() and the page's own
149 * page_mapping() has gone NULL.
150 * The ->sync_page() address_space operation must tolerate
151 * page_mapping() going NULL. By an amazing coincidence,
152 * this comes about because none of the users of the page
153 * in the ->sync_page() methods make essential use of the
154 * page_mapping(), merely passing the page down to the backing
155 * device's unplug functions when it's non-NULL, which in turn
156 * ignore it for all cases but swap, where only page->private is
157 * of interest. When page_mapping() does go NULL, the entire
158 * call stack gracefully ignores the page and returns.
159 * -- wli
143 */ 160 */
144 smp_mb(); 161 smp_mb();
145 mapping = page_mapping(page); 162 mapping = page_mapping(page);
@@ -152,9 +169,10 @@ static int sync_page(void *word)
152/** 169/**
153 * filemap_fdatawrite_range - start writeback against all of a mapping's 170 * filemap_fdatawrite_range - start writeback against all of a mapping's
154 * dirty pages that lie within the byte offsets <start, end> 171 * dirty pages that lie within the byte offsets <start, end>
155 * @mapping: address space structure to write 172 * @mapping: address space structure to write
156 * @start: offset in bytes where the range starts 173 * @start: offset in bytes where the range starts
157 * @end : offset in bytes where the range ends 174 * @end: offset in bytes where the range ends
175 * @sync_mode: enable synchronous operation
158 * 176 *
159 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 177 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
160 * opposed to a regular memory * cleansing writeback. The difference between 178 * opposed to a regular memory * cleansing writeback. The difference between
@@ -518,8 +536,8 @@ EXPORT_SYMBOL(find_trylock_page);
518/** 536/**
519 * find_lock_page - locate, pin and lock a pagecache page 537 * find_lock_page - locate, pin and lock a pagecache page
520 * 538 *
521 * @mapping - the address_space to search 539 * @mapping: the address_space to search
522 * @offset - the page index 540 * @offset: the page index
523 * 541 *
524 * Locates the desired pagecache page, locks it, increments its reference 542 * Locates the desired pagecache page, locks it, increments its reference
525 * count and returns its address. 543 * count and returns its address.
@@ -558,9 +576,9 @@ EXPORT_SYMBOL(find_lock_page);
558/** 576/**
559 * find_or_create_page - locate or add a pagecache page 577 * find_or_create_page - locate or add a pagecache page
560 * 578 *
561 * @mapping - the page's address_space 579 * @mapping: the page's address_space
562 * @index - the page's index into the mapping 580 * @index: the page's index into the mapping
563 * @gfp_mask - page allocation mode 581 * @gfp_mask: page allocation mode
564 * 582 *
565 * Locates a page in the pagecache. If the page is not present, a new page 583 * Locates a page in the pagecache. If the page is not present, a new page
566 * is allocated using @gfp_mask and is added to the pagecache and to the VM's 584 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
@@ -1949,7 +1967,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1949 buf = iov->iov_base + written; 1967 buf = iov->iov_base + written;
1950 else { 1968 else {
1951 filemap_set_next_iovec(&cur_iov, &iov_base, written); 1969 filemap_set_next_iovec(&cur_iov, &iov_base, written);
1952 buf = iov->iov_base + iov_base; 1970 buf = cur_iov->iov_base + iov_base;
1953 } 1971 }
1954 1972
1955 do { 1973 do {
@@ -2007,9 +2025,11 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2007 count -= status; 2025 count -= status;
2008 pos += status; 2026 pos += status;
2009 buf += status; 2027 buf += status;
2010 if (unlikely(nr_segs > 1)) 2028 if (unlikely(nr_segs > 1)) {
2011 filemap_set_next_iovec(&cur_iov, 2029 filemap_set_next_iovec(&cur_iov,
2012 &iov_base, status); 2030 &iov_base, status);
2031 buf = cur_iov->iov_base + iov_base;
2032 }
2013 } 2033 }
2014 } 2034 }
2015 if (unlikely(copied != bytes)) 2035 if (unlikely(copied != bytes))