aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-07-26 18:25:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commit7267ec008b5cd8b3579e188b1ff238815643e372 (patch)
tree06e45eb3b7b951799e452403dfaf77fefb726b54 /mm/filemap.c
parentbae473a423f65e480db83c85b5e92254f6dfcb28 (diff)
mm: postpone page table allocation until we have page to map
The idea (and most of code) is borrowed again: from Hugh's patchset on huge tmpfs[1]. Instead of allocation pte page table upfront, we postpone this until we have page to map in hands. This approach opens possibility to map the page as huge if filesystem supports this. Comparing to Hugh's patch I've pushed page table allocation a bit further: into do_set_pte(). This way we can postpone allocation even in faultaround case without moving do_fault_around() after __do_fault(). do_set_pte() got renamed to alloc_set_pte() as it can allocate page table if required. [1] http://lkml.kernel.org/r/alpine.LSU.2.11.1502202015090.14414@eggly.anvils Link: http://lkml.kernel.org/r/1466021202-61880-10-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 54d5318f8d3f..1efd2994dccf 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2144,11 +2144,6 @@ void filemap_map_pages(struct fault_env *fe,
2144 start_pgoff) { 2144 start_pgoff) {
2145 if (iter.index > end_pgoff) 2145 if (iter.index > end_pgoff)
2146 break; 2146 break;
2147 fe->pte += iter.index - last_pgoff;
2148 fe->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2149 last_pgoff = iter.index;
2150 if (!pte_none(*fe->pte))
2151 goto next;
2152repeat: 2147repeat:
2153 page = radix_tree_deref_slot(slot); 2148 page = radix_tree_deref_slot(slot);
2154 if (unlikely(!page)) 2149 if (unlikely(!page))
@@ -2186,7 +2181,13 @@ repeat:
2186 2181
2187 if (file->f_ra.mmap_miss > 0) 2182 if (file->f_ra.mmap_miss > 0)
2188 file->f_ra.mmap_miss--; 2183 file->f_ra.mmap_miss--;
2189 do_set_pte(fe, page); 2184
2185 fe->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2186 if (fe->pte)
2187 fe->pte += iter.index - last_pgoff;
2188 last_pgoff = iter.index;
2189 if (alloc_set_pte(fe, NULL, page))
2190 goto unlock;
2190 unlock_page(page); 2191 unlock_page(page);
2191 goto next; 2192 goto next;
2192unlock: 2193unlock:
@@ -2194,6 +2195,9 @@ unlock:
2194skip: 2195skip:
2195 put_page(page); 2196 put_page(page);
2196next: 2197next:
2198 /* Huge page is mapped? No need to proceed. */
2199 if (pmd_trans_huge(*fe->pmd))
2200 break;
2197 if (iter.index == end_pgoff) 2201 if (iter.index == end_pgoff)
2198 break; 2202 break;
2199 } 2203 }