aboutsummaryrefslogtreecommitdiffstats
path: root/mm/fremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/fremap.c')
-rw-r--r--mm/fremap.c86
1 files changed, 40 insertions, 46 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index ab23a0673c35..d862be3bc3e3 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -20,33 +20,32 @@
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, 23static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep) 24 unsigned long addr, pte_t *ptep)
25{ 25{
26 pte_t pte = *ptep; 26 pte_t pte = *ptep;
27 struct page *page = NULL;
27 28
28 if (pte_none(pte))
29 return;
30 if (pte_present(pte)) { 29 if (pte_present(pte)) {
31 unsigned long pfn = pte_pfn(pte); 30 unsigned long pfn = pte_pfn(pte);
32
33 flush_cache_page(vma, addr, pfn); 31 flush_cache_page(vma, addr, pfn);
34 pte = ptep_clear_flush(vma, addr, ptep); 32 pte = ptep_clear_flush(vma, addr, ptep);
35 if (pfn_valid(pfn)) { 33 if (unlikely(!pfn_valid(pfn))) {
36 struct page *page = pfn_to_page(pfn); 34 print_bad_pte(vma, pte, addr);
37 if (!PageReserved(page)) { 35 goto out;
38 if (pte_dirty(pte))
39 set_page_dirty(page);
40 page_remove_rmap(page);
41 page_cache_release(page);
42 dec_mm_counter(mm, rss);
43 }
44 } 36 }
37 page = pfn_to_page(pfn);
38 if (pte_dirty(pte))
39 set_page_dirty(page);
40 page_remove_rmap(page);
41 page_cache_release(page);
45 } else { 42 } else {
46 if (!pte_file(pte)) 43 if (!pte_file(pte))
47 free_swap_and_cache(pte_to_swp_entry(pte)); 44 free_swap_and_cache(pte_to_swp_entry(pte));
48 pte_clear(mm, addr, ptep); 45 pte_clear(mm, addr, ptep);
49 } 46 }
47out:
48 return !!page;
50} 49}
51 50
52/* 51/*
@@ -64,21 +63,20 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
64 pud_t *pud; 63 pud_t *pud;
65 pgd_t *pgd; 64 pgd_t *pgd;
66 pte_t pte_val; 65 pte_t pte_val;
66 spinlock_t *ptl;
67
68 BUG_ON(vma->vm_flags & VM_RESERVED);
67 69
68 pgd = pgd_offset(mm, addr); 70 pgd = pgd_offset(mm, addr);
69 spin_lock(&mm->page_table_lock);
70
71 pud = pud_alloc(mm, pgd, addr); 71 pud = pud_alloc(mm, pgd, addr);
72 if (!pud) 72 if (!pud)
73 goto err_unlock; 73 goto out;
74
75 pmd = pmd_alloc(mm, pud, addr); 74 pmd = pmd_alloc(mm, pud, addr);
76 if (!pmd) 75 if (!pmd)
77 goto err_unlock; 76 goto out;
78 77 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
79 pte = pte_alloc_map(mm, pmd, addr);
80 if (!pte) 78 if (!pte)
81 goto err_unlock; 79 goto out;
82 80
83 /* 81 /*
84 * This page may have been truncated. Tell the 82 * This page may have been truncated. Tell the
@@ -88,29 +86,27 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
88 inode = vma->vm_file->f_mapping->host; 86 inode = vma->vm_file->f_mapping->host;
89 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 87 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
90 if (!page->mapping || page->index >= size) 88 if (!page->mapping || page->index >= size)
91 goto err_unlock; 89 goto unlock;
92 err = -ENOMEM; 90 err = -ENOMEM;
93 if (page_mapcount(page) > INT_MAX/2) 91 if (page_mapcount(page) > INT_MAX/2)
94 goto err_unlock; 92 goto unlock;
95 93
96 zap_pte(mm, vma, addr, pte); 94 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
95 inc_mm_counter(mm, file_rss);
97 96
98 inc_mm_counter(mm,rss);
99 flush_icache_page(vma, page); 97 flush_icache_page(vma, page);
100 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 98 set_pte_at(mm, addr, pte, mk_pte(page, prot));
101 page_add_file_rmap(page); 99 page_add_file_rmap(page);
102 pte_val = *pte; 100 pte_val = *pte;
103 pte_unmap(pte);
104 update_mmu_cache(vma, addr, pte_val); 101 update_mmu_cache(vma, addr, pte_val);
105
106 err = 0; 102 err = 0;
107err_unlock: 103unlock:
108 spin_unlock(&mm->page_table_lock); 104 pte_unmap_unlock(pte, ptl);
105out:
109 return err; 106 return err;
110} 107}
111EXPORT_SYMBOL(install_page); 108EXPORT_SYMBOL(install_page);
112 109
113
114/* 110/*
115 * Install a file pte to a given virtual memory address, release any 111 * Install a file pte to a given virtual memory address, release any
116 * previously existing mapping. 112 * previously existing mapping.
@@ -124,37 +120,35 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
124 pud_t *pud; 120 pud_t *pud;
125 pgd_t *pgd; 121 pgd_t *pgd;
126 pte_t pte_val; 122 pte_t pte_val;
123 spinlock_t *ptl;
124
125 BUG_ON(vma->vm_flags & VM_RESERVED);
127 126
128 pgd = pgd_offset(mm, addr); 127 pgd = pgd_offset(mm, addr);
129 spin_lock(&mm->page_table_lock);
130
131 pud = pud_alloc(mm, pgd, addr); 128 pud = pud_alloc(mm, pgd, addr);
132 if (!pud) 129 if (!pud)
133 goto err_unlock; 130 goto out;
134
135 pmd = pmd_alloc(mm, pud, addr); 131 pmd = pmd_alloc(mm, pud, addr);
136 if (!pmd) 132 if (!pmd)
137 goto err_unlock; 133 goto out;
138 134 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
139 pte = pte_alloc_map(mm, pmd, addr);
140 if (!pte) 135 if (!pte)
141 goto err_unlock; 136 goto out;
142 137
143 zap_pte(mm, vma, addr, pte); 138 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
139 update_hiwater_rss(mm);
140 dec_mm_counter(mm, file_rss);
141 }
144 142
145 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 143 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
146 pte_val = *pte; 144 pte_val = *pte;
147 pte_unmap(pte);
148 update_mmu_cache(vma, addr, pte_val); 145 update_mmu_cache(vma, addr, pte_val);
149 spin_unlock(&mm->page_table_lock); 146 pte_unmap_unlock(pte, ptl);
150 return 0; 147 err = 0;
151 148out:
152err_unlock:
153 spin_unlock(&mm->page_table_lock);
154 return err; 149 return err;
155} 150}
156 151
157
158/*** 152/***
159 * sys_remap_file_pages - remap arbitrary pages of a shared backing store 153 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
160 * file within an existing vma. 154 * file within an existing vma.