aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c83
1 files changed, 61 insertions, 22 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index c641edf553a9..bcdc393b6580 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -34,6 +34,7 @@
34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35#include <linux/memcontrol.h> 35#include <linux/memcontrol.h>
36#include <linux/mm_inline.h> /* for page_is_file_cache() */ 36#include <linux/mm_inline.h> /* for page_is_file_cache() */
37#include <linux/cleancache.h>
37#include "internal.h" 38#include "internal.h"
38 39
39/* 40/*
@@ -58,16 +59,16 @@
58/* 59/*
59 * Lock ordering: 60 * Lock ordering:
60 * 61 *
61 * ->i_mmap_lock (truncate_pagecache) 62 * ->i_mmap_mutex (truncate_pagecache)
62 * ->private_lock (__free_pte->__set_page_dirty_buffers) 63 * ->private_lock (__free_pte->__set_page_dirty_buffers)
63 * ->swap_lock (exclusive_swap_page, others) 64 * ->swap_lock (exclusive_swap_page, others)
64 * ->mapping->tree_lock 65 * ->mapping->tree_lock
65 * 66 *
66 * ->i_mutex 67 * ->i_mutex
67 * ->i_mmap_lock (truncate->unmap_mapping_range) 68 * ->i_mmap_mutex (truncate->unmap_mapping_range)
68 * 69 *
69 * ->mmap_sem 70 * ->mmap_sem
70 * ->i_mmap_lock 71 * ->i_mmap_mutex
71 * ->page_table_lock or pte_lock (various, mainly in memory.c) 72 * ->page_table_lock or pte_lock (various, mainly in memory.c)
72 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 73 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
73 * 74 *
@@ -84,7 +85,7 @@
84 * sb_lock (fs/fs-writeback.c) 85 * sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode) 86 * ->mapping->tree_lock (__sync_single_inode)
86 * 87 *
87 * ->i_mmap_lock 88 * ->i_mmap_mutex
88 * ->anon_vma.lock (vma_adjust) 89 * ->anon_vma.lock (vma_adjust)
89 * 90 *
90 * ->anon_vma.lock 91 * ->anon_vma.lock
@@ -106,7 +107,7 @@
106 * 107 *
107 * (code doesn't rely on that order, so you could switch it around) 108 * (code doesn't rely on that order, so you could switch it around)
108 * ->tasklist_lock (memory_failure, collect_procs_ao) 109 * ->tasklist_lock (memory_failure, collect_procs_ao)
109 * ->i_mmap_lock 110 * ->i_mmap_mutex
110 */ 111 */
111 112
112/* 113/*
@@ -118,6 +119,16 @@ void __delete_from_page_cache(struct page *page)
118{ 119{
119 struct address_space *mapping = page->mapping; 120 struct address_space *mapping = page->mapping;
120 121
122 /*
123 * if we're uptodate, flush out into the cleancache, otherwise
124 * invalidate any existing cleancache entries. We can't leave
125 * stale data around in the cleancache once our page is gone
126 */
127 if (PageUptodate(page) && PageMappedToDisk(page))
128 cleancache_put_page(page);
129 else
130 cleancache_flush_page(mapping, page);
131
121 radix_tree_delete(&mapping->page_tree, page->index); 132 radix_tree_delete(&mapping->page_tree, page->index);
122 page->mapping = NULL; 133 page->mapping = NULL;
123 mapping->nrpages--; 134 mapping->nrpages--;
@@ -562,6 +573,17 @@ void wait_on_page_bit(struct page *page, int bit_nr)
562} 573}
563EXPORT_SYMBOL(wait_on_page_bit); 574EXPORT_SYMBOL(wait_on_page_bit);
564 575
576int wait_on_page_bit_killable(struct page *page, int bit_nr)
577{
578 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
579
580 if (!test_bit(bit_nr, &page->flags))
581 return 0;
582
583 return __wait_on_bit(page_waitqueue(page), &wait,
584 sleep_on_page_killable, TASK_KILLABLE);
585}
586
565/** 587/**
566 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 588 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
567 * @page: Page defining the wait queue of interest 589 * @page: Page defining the wait queue of interest
@@ -643,15 +665,32 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
643int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 665int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
644 unsigned int flags) 666 unsigned int flags)
645{ 667{
646 if (!(flags & FAULT_FLAG_ALLOW_RETRY)) { 668 if (flags & FAULT_FLAG_ALLOW_RETRY) {
647 __lock_page(page); 669 /*
648 return 1; 670 * CAUTION! In this case, mmap_sem is not released
649 } else { 671 * even though return 0.
650 if (!(flags & FAULT_FLAG_RETRY_NOWAIT)) { 672 */
651 up_read(&mm->mmap_sem); 673 if (flags & FAULT_FLAG_RETRY_NOWAIT)
674 return 0;
675
676 up_read(&mm->mmap_sem);
677 if (flags & FAULT_FLAG_KILLABLE)
678 wait_on_page_locked_killable(page);
679 else
652 wait_on_page_locked(page); 680 wait_on_page_locked(page);
653 }
654 return 0; 681 return 0;
682 } else {
683 if (flags & FAULT_FLAG_KILLABLE) {
684 int ret;
685
686 ret = __lock_page_killable(page);
687 if (ret) {
688 up_read(&mm->mmap_sem);
689 return 0;
690 }
691 } else
692 __lock_page(page);
693 return 1;
655 } 694 }
656} 695}
657 696
@@ -1528,15 +1567,17 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1528 /* If we don't want any read-ahead, don't bother */ 1567 /* If we don't want any read-ahead, don't bother */
1529 if (VM_RandomReadHint(vma)) 1568 if (VM_RandomReadHint(vma))
1530 return; 1569 return;
1570 if (!ra->ra_pages)
1571 return;
1531 1572
1532 if (VM_SequentialReadHint(vma) || 1573 if (VM_SequentialReadHint(vma)) {
1533 offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
1534 page_cache_sync_readahead(mapping, ra, file, offset, 1574 page_cache_sync_readahead(mapping, ra, file, offset,
1535 ra->ra_pages); 1575 ra->ra_pages);
1536 return; 1576 return;
1537 } 1577 }
1538 1578
1539 if (ra->mmap_miss < INT_MAX) 1579 /* Avoid banging the cache line if not needed */
1580 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1540 ra->mmap_miss++; 1581 ra->mmap_miss++;
1541 1582
1542 /* 1583 /*
@@ -1550,12 +1591,10 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1550 * mmap read-around 1591 * mmap read-around
1551 */ 1592 */
1552 ra_pages = max_sane_readahead(ra->ra_pages); 1593 ra_pages = max_sane_readahead(ra->ra_pages);
1553 if (ra_pages) { 1594 ra->start = max_t(long, 0, offset - ra_pages / 2);
1554 ra->start = max_t(long, 0, offset - ra_pages/2); 1595 ra->size = ra_pages;
1555 ra->size = ra_pages; 1596 ra->async_size = ra_pages / 4;
1556 ra->async_size = 0; 1597 ra_submit(ra, mapping, file);
1557 ra_submit(ra, mapping, file);
1558 }
1559} 1598}
1560 1599
1561/* 1600/*
@@ -1622,6 +1661,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1622 /* No page in the page cache at all */ 1661 /* No page in the page cache at all */
1623 do_sync_mmap_readahead(vma, ra, file, offset); 1662 do_sync_mmap_readahead(vma, ra, file, offset);
1624 count_vm_event(PGMAJFAULT); 1663 count_vm_event(PGMAJFAULT);
1664 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1625 ret = VM_FAULT_MAJOR; 1665 ret = VM_FAULT_MAJOR;
1626retry_find: 1666retry_find:
1627 page = find_get_page(mapping, offset); 1667 page = find_get_page(mapping, offset);
@@ -1660,7 +1700,6 @@ retry_find:
1660 return VM_FAULT_SIGBUS; 1700 return VM_FAULT_SIGBUS;
1661 } 1701 }
1662 1702
1663 ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
1664 vmf->page = page; 1703 vmf->page = page;
1665 return ret | VM_FAULT_LOCKED; 1704 return ret | VM_FAULT_LOCKED;
1666 1705