aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 21:43:52 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 21:43:52 -0500
commit99cd7074891f87c49660e3b2880564324a4733ac (patch)
tree903d2665bcb445f1f265d1adf7a99f265bcefc15 /mm/filemap.c
parente8a9cbf6ae620d9e5ba9cb42001c033287a284a3 (diff)
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
Merge commit 'v2.6.29-rc1' into tracing/urgent
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index f5769b4dc075..ceba0bd03662 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -210,7 +210,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
210 int ret; 210 int ret;
211 struct writeback_control wbc = { 211 struct writeback_control wbc = {
212 .sync_mode = sync_mode, 212 .sync_mode = sync_mode,
213 .nr_to_write = mapping->nrpages * 2, 213 .nr_to_write = LONG_MAX,
214 .range_start = start, 214 .range_start = start,
215 .range_end = end, 215 .range_end = end,
216 }; 216 };
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
460 VM_BUG_ON(!PageLocked(page)); 460 VM_BUG_ON(!PageLocked(page));
461 461
462 error = mem_cgroup_cache_charge(page, current->mm, 462 error = mem_cgroup_cache_charge(page, current->mm,
463 gfp_mask & ~__GFP_HIGHMEM); 463 gfp_mask & GFP_RECLAIM_MASK);
464 if (error) 464 if (error)
465 goto out; 465 goto out;
466 466
@@ -741,7 +741,14 @@ repeat:
741 page = __page_cache_alloc(gfp_mask); 741 page = __page_cache_alloc(gfp_mask);
742 if (!page) 742 if (!page)
743 return NULL; 743 return NULL;
744 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); 744 /*
745 * We want a regular kernel memory (not highmem or DMA etc)
746 * allocation for the radix tree nodes, but we need to honour
747 * the context-specific requirements the caller has asked for.
748 * GFP_RECLAIM_MASK collects those requirements.
749 */
750 err = add_to_page_cache_lru(page, mapping, index,
751 (gfp_mask & GFP_RECLAIM_MASK));
745 if (unlikely(err)) { 752 if (unlikely(err)) {
746 page_cache_release(page); 753 page_cache_release(page);
747 page = NULL; 754 page = NULL;
@@ -950,7 +957,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
950 return NULL; 957 return NULL;
951 } 958 }
952 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); 959 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
953 if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) { 960 if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
954 page_cache_release(page); 961 page_cache_release(page);
955 page = NULL; 962 page = NULL;
956 } 963 }
@@ -1317,7 +1324,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1317 goto out; /* skip atime */ 1324 goto out; /* skip atime */
1318 size = i_size_read(inode); 1325 size = i_size_read(inode);
1319 if (pos < size) { 1326 if (pos < size) {
1320 retval = filemap_write_and_wait(mapping); 1327 retval = filemap_write_and_wait_range(mapping, pos,
1328 pos + iov_length(iov, nr_segs) - 1);
1321 if (!retval) { 1329 if (!retval) {
1322 retval = mapping->a_ops->direct_IO(READ, iocb, 1330 retval = mapping->a_ops->direct_IO(READ, iocb,
1323 iov, pos, nr_segs); 1331 iov, pos, nr_segs);
@@ -1530,7 +1538,6 @@ retry_find:
1530 /* 1538 /*
1531 * Found the page and have a reference on it. 1539 * Found the page and have a reference on it.
1532 */ 1540 */
1533 mark_page_accessed(page);
1534 ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT; 1541 ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
1535 vmf->page = page; 1542 vmf->page = page;
1536 return ret | VM_FAULT_LOCKED; 1543 return ret | VM_FAULT_LOCKED;
@@ -2060,18 +2067,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2060 if (count != ocount) 2067 if (count != ocount)
2061 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); 2068 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2062 2069
2063 /*
2064 * Unmap all mmappings of the file up-front.
2065 *
2066 * This will cause any pte dirty bits to be propagated into the
2067 * pageframes for the subsequent filemap_write_and_wait().
2068 */
2069 write_len = iov_length(iov, *nr_segs); 2070 write_len = iov_length(iov, *nr_segs);
2070 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2071 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2071 if (mapping_mapped(mapping))
2072 unmap_mapping_range(mapping, pos, write_len, 0);
2073 2072
2074 written = filemap_write_and_wait(mapping); 2073 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2075 if (written) 2074 if (written)
2076 goto out; 2075 goto out;
2077 2076
@@ -2291,7 +2290,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2291 * the file data here, to try to honour O_DIRECT expectations. 2290 * the file data here, to try to honour O_DIRECT expectations.
2292 */ 2291 */
2293 if (unlikely(file->f_flags & O_DIRECT) && written) 2292 if (unlikely(file->f_flags & O_DIRECT) && written)
2294 status = filemap_write_and_wait(mapping); 2293 status = filemap_write_and_wait_range(mapping,
2294 pos, pos + written - 1);
2295 2295
2296 return written ? written : status; 2296 return written ? written : status;
2297} 2297}