diff options
-rw-r--r-- | include/linux/swap.h | 2 | ||||
-rw-r--r-- | mm/memory.c | 18 | ||||
-rw-r--r-- | mm/swap.c | 21 |
3 files changed, 33 insertions, 8 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 7edb4cbc29f9..07eda69412fb 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -173,6 +173,8 @@ extern unsigned int nr_free_pagecache_pages(void); | |||
173 | /* linux/mm/swap.c */ | 173 | /* linux/mm/swap.c */ |
174 | extern void __lru_cache_add(struct page *, enum lru_list lru); | 174 | extern void __lru_cache_add(struct page *, enum lru_list lru); |
175 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | 175 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); |
176 | extern void lru_cache_add_active_or_unevictable(struct page *, | ||
177 | struct vm_area_struct *); | ||
176 | extern void activate_page(struct page *); | 178 | extern void activate_page(struct page *); |
177 | extern void mark_page_accessed(struct page *); | 179 | extern void mark_page_accessed(struct page *); |
178 | extern void lru_add_drain(void); | 180 | extern void lru_add_drain(void); |
diff --git a/mm/memory.c b/mm/memory.c index 9fef7272fb9e..450127f4c582 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1922,12 +1922,13 @@ gotten: | |||
1922 | * thread doing COW. | 1922 | * thread doing COW. |
1923 | */ | 1923 | */ |
1924 | ptep_clear_flush_notify(vma, address, page_table); | 1924 | ptep_clear_flush_notify(vma, address, page_table); |
1925 | set_pte_at(mm, address, page_table, entry); | ||
1926 | update_mmu_cache(vma, address, entry); | ||
1927 | SetPageSwapBacked(new_page); | 1925 | SetPageSwapBacked(new_page); |
1928 | lru_cache_add_active_anon(new_page); | 1926 | lru_cache_add_active_or_unevictable(new_page, vma); |
1929 | page_add_new_anon_rmap(new_page, vma, address); | 1927 | page_add_new_anon_rmap(new_page, vma, address); |
1930 | 1928 | ||
1929 | //TODO: is this safe? do_anonymous_page() does it this way. | ||
1930 | set_pte_at(mm, address, page_table, entry); | ||
1931 | update_mmu_cache(vma, address, entry); | ||
1931 | if (old_page) { | 1932 | if (old_page) { |
1932 | /* | 1933 | /* |
1933 | * Only after switching the pte to the new page may | 1934 | * Only after switching the pte to the new page may |
@@ -2420,7 +2421,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2420 | goto release; | 2421 | goto release; |
2421 | inc_mm_counter(mm, anon_rss); | 2422 | inc_mm_counter(mm, anon_rss); |
2422 | SetPageSwapBacked(page); | 2423 | SetPageSwapBacked(page); |
2423 | lru_cache_add_active_anon(page); | 2424 | lru_cache_add_active_or_unevictable(page, vma); |
2424 | page_add_new_anon_rmap(page, vma, address); | 2425 | page_add_new_anon_rmap(page, vma, address); |
2425 | set_pte_at(mm, address, page_table, entry); | 2426 | set_pte_at(mm, address, page_table, entry); |
2426 | 2427 | ||
@@ -2564,12 +2565,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2564 | entry = mk_pte(page, vma->vm_page_prot); | 2565 | entry = mk_pte(page, vma->vm_page_prot); |
2565 | if (flags & FAULT_FLAG_WRITE) | 2566 | if (flags & FAULT_FLAG_WRITE) |
2566 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 2567 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
2567 | set_pte_at(mm, address, page_table, entry); | ||
2568 | if (anon) { | 2568 | if (anon) { |
2569 | inc_mm_counter(mm, anon_rss); | 2569 | inc_mm_counter(mm, anon_rss); |
2570 | SetPageSwapBacked(page); | 2570 | SetPageSwapBacked(page); |
2571 | lru_cache_add_active_anon(page); | 2571 | lru_cache_add_active_or_unevictable(page, vma); |
2572 | page_add_new_anon_rmap(page, vma, address); | 2572 | page_add_new_anon_rmap(page, vma, address); |
2573 | } else { | 2573 | } else { |
2574 | inc_mm_counter(mm, file_rss); | 2574 | inc_mm_counter(mm, file_rss); |
2575 | page_add_file_rmap(page); | 2575 | page_add_file_rmap(page); |
@@ -2578,6 +2578,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2578 | get_page(dirty_page); | 2578 | get_page(dirty_page); |
2579 | } | 2579 | } |
2580 | } | 2580 | } |
2581 | //TODO: is this safe? do_anonymous_page() does it this way. | ||
2582 | set_pte_at(mm, address, page_table, entry); | ||
2581 | 2583 | ||
2582 | /* no need to invalidate: a not-present page won't be cached */ | 2584 | /* no need to invalidate: a not-present page won't be cached */ |
2583 | update_mmu_cache(vma, address, entry); | 2585 | update_mmu_cache(vma, address, entry); |
@@ -31,6 +31,8 @@ | |||
31 | #include <linux/backing-dev.h> | 31 | #include <linux/backing-dev.h> |
32 | #include <linux/memcontrol.h> | 32 | #include <linux/memcontrol.h> |
33 | 33 | ||
34 | #include "internal.h" | ||
35 | |||
34 | /* How many pages do we try to swap or page in/out together? */ | 36 | /* How many pages do we try to swap or page in/out together? */ |
35 | int page_cluster; | 37 | int page_cluster; |
36 | 38 | ||
@@ -244,6 +246,25 @@ void add_page_to_unevictable_list(struct page *page) | |||
244 | spin_unlock_irq(&zone->lru_lock); | 246 | spin_unlock_irq(&zone->lru_lock); |
245 | } | 247 | } |
246 | 248 | ||
249 | /** | ||
250 | * lru_cache_add_active_or_unevictable | ||
251 | * @page: the page to be added to LRU | ||
252 | * @vma: vma in which page is mapped for determining reclaimability | ||
253 | * | ||
254 | * place @page on active or unevictable LRU list, depending on | ||
255 | * page_evictable(). Note that if the page is not evictable, | ||
256 | * it goes directly back onto it's zone's unevictable list. It does | ||
257 | * NOT use a per cpu pagevec. | ||
258 | */ | ||
259 | void lru_cache_add_active_or_unevictable(struct page *page, | ||
260 | struct vm_area_struct *vma) | ||
261 | { | ||
262 | if (page_evictable(page, vma)) | ||
263 | lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page)); | ||
264 | else | ||
265 | add_page_to_unevictable_list(page); | ||
266 | } | ||
267 | |||
247 | /* | 268 | /* |
248 | * Drain pages out of the cpu's pagevecs. | 269 | * Drain pages out of the cpu's pagevecs. |
249 | * Either "cpu" is the current CPU, and preemption has already been | 270 | * Either "cpu" is the current CPU, and preemption has already been |