aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c128
1 files changed, 127 insertions, 1 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 5a340f4ca212..0a011e421bb4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -15,6 +15,7 @@
15#include <linux/migrate.h> 15#include <linux/migrate.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/swap.h> 17#include <linux/swap.h>
18#include <linux/swapops.h>
18#include <linux/pagemap.h> 19#include <linux/pagemap.h>
19#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
20#include <linux/mm_inline.h> 21#include <linux/mm_inline.h>
@@ -23,7 +24,6 @@
23#include <linux/topology.h> 24#include <linux/topology.h>
24#include <linux/cpu.h> 25#include <linux/cpu.h>
25#include <linux/cpuset.h> 26#include <linux/cpuset.h>
26#include <linux/swapops.h>
27 27
28#include "internal.h" 28#include "internal.h"
29 29
@@ -119,6 +119,132 @@ int putback_lru_pages(struct list_head *l)
119 return count; 119 return count;
120} 120}
121 121
122static inline int is_swap_pte(pte_t pte)
123{
124 return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
125}
126
127/*
128 * Restore a potential migration pte to a working pte entry
129 */
130static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr,
131 struct page *old, struct page *new)
132{
133 struct mm_struct *mm = vma->vm_mm;
134 swp_entry_t entry;
135 pgd_t *pgd;
136 pud_t *pud;
137 pmd_t *pmd;
138 pte_t *ptep, pte;
139 spinlock_t *ptl;
140
141 pgd = pgd_offset(mm, addr);
142 if (!pgd_present(*pgd))
143 return;
144
145 pud = pud_offset(pgd, addr);
146 if (!pud_present(*pud))
147 return;
148
149 pmd = pmd_offset(pud, addr);
150 if (!pmd_present(*pmd))
151 return;
152
153 ptep = pte_offset_map(pmd, addr);
154
155 if (!is_swap_pte(*ptep)) {
156 pte_unmap(ptep);
157 return;
158 }
159
160 ptl = pte_lockptr(mm, pmd);
161 spin_lock(ptl);
162 pte = *ptep;
163 if (!is_swap_pte(pte))
164 goto out;
165
166 entry = pte_to_swp_entry(pte);
167
168 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
169 goto out;
170
171 inc_mm_counter(mm, anon_rss);
172 get_page(new);
173 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
174 if (is_write_migration_entry(entry))
175 pte = pte_mkwrite(pte);
176 set_pte_at(mm, addr, ptep, pte);
177 page_add_anon_rmap(new, vma, addr);
178out:
179 pte_unmap_unlock(ptep, ptl);
180}
181
182/*
183 * Get rid of all migration entries and replace them by
184 * references to the indicated page.
185 *
186 * Must hold mmap_sem lock on at least one of the vmas containing
187 * the page so that the anon_vma cannot vanish.
188 */
189static void remove_migration_ptes(struct page *old, struct page *new)
190{
191 struct anon_vma *anon_vma;
192 struct vm_area_struct *vma;
193 unsigned long mapping;
194
195 mapping = (unsigned long)new->mapping;
196
197 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
198 return;
199
200 /*
201 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
202 */
203 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
204 spin_lock(&anon_vma->lock);
205
206 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
207 remove_migration_pte(vma, page_address_in_vma(new, vma),
208 old, new);
209
210 spin_unlock(&anon_vma->lock);
211}
212
213/*
214 * Something used the pte of a page under migration. We need to
215 * get to the page and wait until migration is finished.
216 * When we return from this function the fault will be retried.
217 *
218 * This function is called from do_swap_page().
219 */
220void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
221 unsigned long address)
222{
223 pte_t *ptep, pte;
224 spinlock_t *ptl;
225 swp_entry_t entry;
226 struct page *page;
227
228 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
229 pte = *ptep;
230 if (!is_swap_pte(pte))
231 goto out;
232
233 entry = pte_to_swp_entry(pte);
234 if (!is_migration_entry(entry))
235 goto out;
236
237 page = migration_entry_to_page(entry);
238
239 get_page(page);
240 pte_unmap_unlock(ptep, ptl);
241 wait_on_page_locked(page);
242 put_page(page);
243 return;
244out:
245 pte_unmap_unlock(ptep, ptl);
246}
247
122/* 248/*
123 * swapout a single page 249 * swapout a single page
124 * page is locked upon entry, unlocked on exit 250 * page is locked upon entry, unlocked on exit