aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/swap.h1
-rw-r--r--mm/rmap.c29
-rw-r--r--mm/swapfile.c9
-rw-r--r--mm/vmscan.c9
5 files changed, 49 insertions, 0 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 0f1ea2d6ed86..d6b9bcd1384c 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -92,6 +92,7 @@ static inline void page_dup_rmap(struct page *page)
92 */ 92 */
93int page_referenced(struct page *, int is_locked); 93int page_referenced(struct page *, int is_locked);
94int try_to_unmap(struct page *, int ignore_refs); 94int try_to_unmap(struct page *, int ignore_refs);
95void remove_from_swap(struct page *page);
95 96
96/* 97/*
97 * Called from mm/filemap_xip.c to unmap empty zero page 98 * Called from mm/filemap_xip.c to unmap empty zero page
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d359fc022433..229b6d04b4b6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -248,6 +248,7 @@ extern int remove_exclusive_swap_page(struct page *);
248struct backing_dev_info; 248struct backing_dev_info;
249 249
250extern spinlock_t swap_lock; 250extern spinlock_t swap_lock;
251extern int remove_vma_swap(struct vm_area_struct *vma, struct page *page);
251 252
252/* linux/mm/thrash.c */ 253/* linux/mm/thrash.c */
253extern struct mm_struct * swap_token_mm; 254extern struct mm_struct * swap_token_mm;
diff --git a/mm/rmap.c b/mm/rmap.c
index 13fad5fcdf79..f4b91d7aa5cf 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -206,6 +206,35 @@ out:
206 return anon_vma; 206 return anon_vma;
207} 207}
208 208
209#ifdef CONFIG_MIGRATION
210/*
211 * Remove an anonymous page from swap replacing the swap pte's
212 * through real pte's pointing to valid pages and then releasing
213 * the page from the swap cache.
214 *
215 * Must hold page lock on page.
216 */
217void remove_from_swap(struct page *page)
218{
219 struct anon_vma *anon_vma;
220 struct vm_area_struct *vma;
221
222 if (!PageAnon(page) || !PageSwapCache(page))
223 return;
224
225 anon_vma = page_lock_anon_vma(page);
226 if (!anon_vma)
227 return;
228
229 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
230 remove_vma_swap(vma, page);
231
232 spin_unlock(&anon_vma->lock);
233
234 delete_from_swap_cache(page);
235}
236#endif
237
209/* 238/*
210 * At what user virtual address is page expected in vma? 239 * At what user virtual address is page expected in vma?
211 */ 240 */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9678182e0eef..1f9cf0d073b8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -554,6 +554,15 @@ static int unuse_mm(struct mm_struct *mm,
554 return 0; 554 return 0;
555} 555}
556 556
557#ifdef CONFIG_MIGRATION
558int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
559{
560 swp_entry_t entry = { .val = page_private(page) };
561
562 return unuse_vma(vma, entry, page);
563}
564#endif
565
557/* 566/*
558 * Scan swap_map from current position to next entry still in use. 567 * Scan swap_map from current position to next entry still in use.
559 * Recycle to start on reaching the end, returning 0 when empty. 568 * Recycle to start on reaching the end, returning 0 when empty.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8f326ce2b690..5e98b86feb74 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -804,6 +804,15 @@ int migrate_page(struct page *newpage, struct page *page)
804 804
805 migrate_page_copy(newpage, page); 805 migrate_page_copy(newpage, page);
806 806
807 /*
808 * Remove auxiliary swap entries and replace
809 * them with real ptes.
810 *
811 * Note that a real pte entry will allow processes that are not
812 * waiting on the page lock to use the new page via the page tables
813 * before the new page is unlocked.
814 */
815 remove_from_swap(newpage);
807 return 0; 816 return 0;
808} 817}
809 818