aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index b5346576e58d..768687f1d46b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -66,7 +66,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
66 * 66 *
67 * ->mmap_sem 67 * ->mmap_sem
68 * ->i_mmap_lock 68 * ->i_mmap_lock
69 * ->page_table_lock (various places, mainly in mmap.c) 69 * ->page_table_lock or pte_lock (various, mainly in memory.c)
70 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 70 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
71 * 71 *
72 * ->mmap_sem 72 * ->mmap_sem
@@ -86,9 +86,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
86 * ->anon_vma.lock (vma_adjust) 86 * ->anon_vma.lock (vma_adjust)
87 * 87 *
88 * ->anon_vma.lock 88 * ->anon_vma.lock
89 * ->page_table_lock (anon_vma_prepare and various) 89 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
90 * 90 *
91 * ->page_table_lock 91 * ->page_table_lock or pte_lock
92 * ->swap_lock (try_to_unmap_one) 92 * ->swap_lock (try_to_unmap_one)
93 * ->private_lock (try_to_unmap_one) 93 * ->private_lock (try_to_unmap_one)
94 * ->tree_lock (try_to_unmap_one) 94 * ->tree_lock (try_to_unmap_one)
@@ -152,7 +152,7 @@ static int sync_page(void *word)
152 * in the ->sync_page() methods make essential use of the 152 * in the ->sync_page() methods make essential use of the
153 * page_mapping(), merely passing the page down to the backing 153 * page_mapping(), merely passing the page down to the backing
154 * device's unplug functions when it's non-NULL, which in turn 154 * device's unplug functions when it's non-NULL, which in turn
155 * ignore it for all cases but swap, where only page->private is 155 * ignore it for all cases but swap, where only page_private(page) is
156 * of interest. When page_mapping() does go NULL, the entire 156 * of interest. When page_mapping() does go NULL, the entire
157 * call stack gracefully ignores the page and returns. 157 * call stack gracefully ignores the page and returns.
158 * -- wli 158 * -- wli
@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
377 * This function does not add the page to the LRU. The caller must do that. 377 * This function does not add the page to the LRU. The caller must do that.
378 */ 378 */
379int add_to_page_cache(struct page *page, struct address_space *mapping, 379int add_to_page_cache(struct page *page, struct address_space *mapping,
380 pgoff_t offset, int gfp_mask) 380 pgoff_t offset, gfp_t gfp_mask)
381{ 381{
382 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 382 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
383 383
@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
401EXPORT_SYMBOL(add_to_page_cache); 401EXPORT_SYMBOL(add_to_page_cache);
402 402
403int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 403int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
404 pgoff_t offset, int gfp_mask) 404 pgoff_t offset, gfp_t gfp_mask)
405{ 405{
406 int ret = add_to_page_cache(page, mapping, offset, gfp_mask); 406 int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
407 if (ret == 0) 407 if (ret == 0)
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
591 * memory exhaustion. 591 * memory exhaustion.
592 */ 592 */
593struct page *find_or_create_page(struct address_space *mapping, 593struct page *find_or_create_page(struct address_space *mapping,
594 unsigned long index, unsigned int gfp_mask) 594 unsigned long index, gfp_t gfp_mask)
595{ 595{
596 struct page *page, *cached_page = NULL; 596 struct page *page, *cached_page = NULL;
597 int err; 597 int err;
@@ -683,7 +683,7 @@ struct page *
683grab_cache_page_nowait(struct address_space *mapping, unsigned long index) 683grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
684{ 684{
685 struct page *page = find_get_page(mapping, index); 685 struct page *page = find_get_page(mapping, index);
686 unsigned int gfp_mask; 686 gfp_t gfp_mask;
687 687
688 if (page) { 688 if (page) {
689 if (!TestSetPageLocked(page)) 689 if (!TestSetPageLocked(page))
@@ -1520,7 +1520,7 @@ repeat:
1520 page_cache_release(page); 1520 page_cache_release(page);
1521 return err; 1521 return err;
1522 } 1522 }
1523 } else { 1523 } else if (vma->vm_flags & VM_NONLINEAR) {
1524 /* No page was found just because we can't read it in now (being 1524 /* No page was found just because we can't read it in now (being
1525 * here implies nonblock != 0), but the page may exist, so set 1525 * here implies nonblock != 0), but the page may exist, so set
1526 * the PTE to fault it in later. */ 1526 * the PTE to fault it in later. */
@@ -1537,6 +1537,7 @@ repeat:
1537 1537
1538 return 0; 1538 return 0;
1539} 1539}
1540EXPORT_SYMBOL(filemap_populate);
1540 1541
1541struct vm_operations_struct generic_file_vm_ops = { 1542struct vm_operations_struct generic_file_vm_ops = {
1542 .nopage = filemap_nopage, 1543 .nopage = filemap_nopage,
@@ -1555,7 +1556,6 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1555 vma->vm_ops = &generic_file_vm_ops; 1556 vma->vm_ops = &generic_file_vm_ops;
1556 return 0; 1557 return 0;
1557} 1558}
1558EXPORT_SYMBOL(filemap_populate);
1559 1559
1560/* 1560/*
1561 * This is for filesystems which do not implement ->writepage. 1561 * This is for filesystems which do not implement ->writepage.