aboutsummaryrefslogtreecommitdiffstats
path: root/mm/fremap.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-07-19 04:47:03 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:41 -0400
commitd0217ac04ca6591841e5665f518e38064f4e65bd (patch)
treed3309094bb734d34773f97d642593e298a5cfcfc /mm/fremap.c
parented2f2f9b3ff8debdf512f7687b232c3c1d7d60d7 (diff)
mm: fault feedback #1
Change ->fault prototype. We now return an int, which contains VM_FAULT_xxx code in the low byte, and FAULT_RET_xxx code in the next byte. FAULT_RET_ code tells the VM whether a page was found, whether it has been locked, and potentially other things. This is not quite the way he wanted it yet, but that's changed in the next patch (which requires changes to arch code). This means we no longer set VM_CAN_INVALIDATE in the vma in order to say that a page is locked which requires filemap_nopage to go away (because we can no longer remain backward compatible without that flag), but we were going to do that anyway. struct fault_data is renamed to struct vm_fault as Linus asked. address is now a void __user * that we should firmly encourage drivers not to use without really good reason. The page is now returned via a page pointer in the vm_fault struct. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/fremap.c')
-rw-r--r--mm/fremap.c85
1 files changed, 16 insertions, 69 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index 01e51f01b84e..5f50d736a037 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -20,13 +20,14 @@
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, 23static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep) 24 unsigned long addr, pte_t *ptep)
25{ 25{
26 pte_t pte = *ptep; 26 pte_t pte = *ptep;
27 struct page *page = NULL;
28 27
29 if (pte_present(pte)) { 28 if (pte_present(pte)) {
29 struct page *page;
30
30 flush_cache_page(vma, addr, pte_pfn(pte)); 31 flush_cache_page(vma, addr, pte_pfn(pte));
31 pte = ptep_clear_flush(vma, addr, ptep); 32 pte = ptep_clear_flush(vma, addr, ptep);
32 page = vm_normal_page(vma, addr, pte); 33 page = vm_normal_page(vma, addr, pte);
@@ -35,68 +36,21 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
35 set_page_dirty(page); 36 set_page_dirty(page);
36 page_remove_rmap(page, vma); 37 page_remove_rmap(page, vma);
37 page_cache_release(page); 38 page_cache_release(page);
39 update_hiwater_rss(mm);
40 dec_mm_counter(mm, file_rss);
38 } 41 }
39 } else { 42 } else {
40 if (!pte_file(pte)) 43 if (!pte_file(pte))
41 free_swap_and_cache(pte_to_swp_entry(pte)); 44 free_swap_and_cache(pte_to_swp_entry(pte));
42 pte_clear_not_present_full(mm, addr, ptep, 0); 45 pte_clear_not_present_full(mm, addr, ptep, 0);
43 } 46 }
44 return !!page;
45} 47}
46 48
47/* 49/*
48 * Install a file page to a given virtual memory address, release any
49 * previously existing mapping.
50 */
51int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
52 unsigned long addr, struct page *page, pgprot_t prot)
53{
54 struct inode *inode;
55 pgoff_t size;
56 int err = -ENOMEM;
57 pte_t *pte;
58 pte_t pte_val;
59 spinlock_t *ptl;
60
61 pte = get_locked_pte(mm, addr, &ptl);
62 if (!pte)
63 goto out;
64
65 /*
66 * This page may have been truncated. Tell the
67 * caller about it.
68 */
69 err = -EINVAL;
70 inode = vma->vm_file->f_mapping->host;
71 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
72 if (!page->mapping || page->index >= size)
73 goto unlock;
74 err = -ENOMEM;
75 if (page_mapcount(page) > INT_MAX/2)
76 goto unlock;
77
78 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
79 inc_mm_counter(mm, file_rss);
80
81 flush_icache_page(vma, page);
82 pte_val = mk_pte(page, prot);
83 set_pte_at(mm, addr, pte, pte_val);
84 page_add_file_rmap(page);
85 update_mmu_cache(vma, addr, pte_val);
86 lazy_mmu_prot_update(pte_val);
87 err = 0;
88unlock:
89 pte_unmap_unlock(pte, ptl);
90out:
91 return err;
92}
93EXPORT_SYMBOL(install_page);
94
95/*
96 * Install a file pte to a given virtual memory address, release any 50 * Install a file pte to a given virtual memory address, release any
97 * previously existing mapping. 51 * previously existing mapping.
98 */ 52 */
99int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, 53static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
100 unsigned long addr, unsigned long pgoff, pgprot_t prot) 54 unsigned long addr, unsigned long pgoff, pgprot_t prot)
101{ 55{
102 int err = -ENOMEM; 56 int err = -ENOMEM;
@@ -107,10 +61,8 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
107 if (!pte) 61 if (!pte)
108 goto out; 62 goto out;
109 63
110 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) { 64 if (!pte_none(*pte))
111 update_hiwater_rss(mm); 65 zap_pte(mm, vma, addr, pte);
112 dec_mm_counter(mm, file_rss);
113 }
114 66
115 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 67 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
116 /* 68 /*
@@ -208,8 +160,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
208 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) 160 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
209 goto out; 161 goto out;
210 162
211 if ((!vma->vm_ops || !vma->vm_ops->populate) && 163 if (!vma->vm_flags & VM_CAN_NONLINEAR)
212 !(vma->vm_flags & VM_CAN_NONLINEAR))
213 goto out; 164 goto out;
214 165
215 if (end <= start || start < vma->vm_start || end > vma->vm_end) 166 if (end <= start || start < vma->vm_start || end > vma->vm_end)
@@ -239,18 +190,14 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
239 spin_unlock(&mapping->i_mmap_lock); 190 spin_unlock(&mapping->i_mmap_lock);
240 } 191 }
241 192
242 if (vma->vm_flags & VM_CAN_NONLINEAR) { 193 err = populate_range(mm, vma, start, size, pgoff);
243 err = populate_range(mm, vma, start, size, pgoff); 194 if (!err && !(flags & MAP_NONBLOCK)) {
244 if (!err && !(flags & MAP_NONBLOCK)) { 195 if (unlikely(has_write_lock)) {
245 if (unlikely(has_write_lock)) { 196 downgrade_write(&mm->mmap_sem);
246 downgrade_write(&mm->mmap_sem); 197 has_write_lock = 0;
247 has_write_lock = 0;
248 }
249 make_pages_present(start, start+size);
250 } 198 }
251 } else 199 make_pages_present(start, start+size);
252 err = vma->vm_ops->populate(vma, start, size, vma->vm_page_prot, 200 }
253 pgoff, flags & MAP_NONBLOCK);
254 201
255 /* 202 /*
256 * We can't clear VM_NONLINEAR because we'd have to do 203 * We can't clear VM_NONLINEAR because we'd have to do