diff options
-rw-r--r-- | include/linux/swap.h | 2 | ||||
-rw-r--r-- | mm/memory.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 7 | ||||
-rw-r--r-- | mm/swap.c | 19 |
4 files changed, 6 insertions, 28 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index a3af95b2cb6d..48f309dc5a0c 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -174,8 +174,6 @@ extern unsigned int nr_free_pagecache_pages(void); | |||
174 | /* linux/mm/swap.c */ | 174 | /* linux/mm/swap.c */ |
175 | extern void __lru_cache_add(struct page *, enum lru_list lru); | 175 | extern void __lru_cache_add(struct page *, enum lru_list lru); |
176 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | 176 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); |
177 | extern void lru_cache_add_active_or_unevictable(struct page *, | ||
178 | struct vm_area_struct *); | ||
179 | extern void activate_page(struct page *); | 177 | extern void activate_page(struct page *); |
180 | extern void mark_page_accessed(struct page *); | 178 | extern void mark_page_accessed(struct page *); |
181 | extern void lru_add_drain(void); | 179 | extern void lru_add_drain(void); |
diff --git a/mm/memory.c b/mm/memory.c index b5af358b8b22..a138c50dc39a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1949,10 +1949,7 @@ gotten: | |||
1949 | */ | 1949 | */ |
1950 | ptep_clear_flush_notify(vma, address, page_table); | 1950 | ptep_clear_flush_notify(vma, address, page_table); |
1951 | SetPageSwapBacked(new_page); | 1951 | SetPageSwapBacked(new_page); |
1952 | lru_cache_add_active_or_unevictable(new_page, vma); | ||
1953 | page_add_new_anon_rmap(new_page, vma, address); | 1952 | page_add_new_anon_rmap(new_page, vma, address); |
1954 | |||
1955 | //TODO: is this safe? do_anonymous_page() does it this way. | ||
1956 | set_pte_at(mm, address, page_table, entry); | 1953 | set_pte_at(mm, address, page_table, entry); |
1957 | update_mmu_cache(vma, address, entry); | 1954 | update_mmu_cache(vma, address, entry); |
1958 | if (old_page) { | 1955 | if (old_page) { |
@@ -2448,7 +2445,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2448 | goto release; | 2445 | goto release; |
2449 | inc_mm_counter(mm, anon_rss); | 2446 | inc_mm_counter(mm, anon_rss); |
2450 | SetPageSwapBacked(page); | 2447 | SetPageSwapBacked(page); |
2451 | lru_cache_add_active_or_unevictable(page, vma); | ||
2452 | page_add_new_anon_rmap(page, vma, address); | 2448 | page_add_new_anon_rmap(page, vma, address); |
2453 | set_pte_at(mm, address, page_table, entry); | 2449 | set_pte_at(mm, address, page_table, entry); |
2454 | 2450 | ||
@@ -2597,7 +2593,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2597 | if (anon) { | 2593 | if (anon) { |
2598 | inc_mm_counter(mm, anon_rss); | 2594 | inc_mm_counter(mm, anon_rss); |
2599 | SetPageSwapBacked(page); | 2595 | SetPageSwapBacked(page); |
2600 | lru_cache_add_active_or_unevictable(page, vma); | ||
2601 | page_add_new_anon_rmap(page, vma, address); | 2596 | page_add_new_anon_rmap(page, vma, address); |
2602 | } else { | 2597 | } else { |
2603 | inc_mm_counter(mm, file_rss); | 2598 | inc_mm_counter(mm, file_rss); |
@@ -2607,7 +2602,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2607 | get_page(dirty_page); | 2602 | get_page(dirty_page); |
2608 | } | 2603 | } |
2609 | } | 2604 | } |
2610 | //TODO: is this safe? do_anonymous_page() does it this way. | ||
2611 | set_pte_at(mm, address, page_table, entry); | 2605 | set_pte_at(mm, address, page_table, entry); |
2612 | 2606 | ||
2613 | /* no need to invalidate: a not-present page won't be cached */ | 2607 | /* no need to invalidate: a not-present page won't be cached */ |
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/rmap.h> | 47 | #include <linux/rmap.h> |
48 | #include <linux/rcupdate.h> | 48 | #include <linux/rcupdate.h> |
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/mm_inline.h> | ||
50 | #include <linux/kallsyms.h> | 51 | #include <linux/kallsyms.h> |
51 | #include <linux/memcontrol.h> | 52 | #include <linux/memcontrol.h> |
52 | #include <linux/mmu_notifier.h> | 53 | #include <linux/mmu_notifier.h> |
@@ -671,9 +672,13 @@ void page_add_anon_rmap(struct page *page, | |||
671 | void page_add_new_anon_rmap(struct page *page, | 672 | void page_add_new_anon_rmap(struct page *page, |
672 | struct vm_area_struct *vma, unsigned long address) | 673 | struct vm_area_struct *vma, unsigned long address) |
673 | { | 674 | { |
674 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 675 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
675 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ | 676 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ |
676 | __page_set_anon_rmap(page, vma, address); | 677 | __page_set_anon_rmap(page, vma, address); |
678 | if (page_evictable(page, vma)) | ||
679 | lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page)); | ||
680 | else | ||
681 | add_page_to_unevictable_list(page); | ||
677 | } | 682 | } |
678 | 683 | ||
679 | /** | 684 | /** |
@@ -246,25 +246,6 @@ void add_page_to_unevictable_list(struct page *page) | |||
246 | spin_unlock_irq(&zone->lru_lock); | 246 | spin_unlock_irq(&zone->lru_lock); |
247 | } | 247 | } |
248 | 248 | ||
249 | /** | ||
250 | * lru_cache_add_active_or_unevictable | ||
251 | * @page: the page to be added to LRU | ||
252 | * @vma: vma in which page is mapped for determining reclaimability | ||
253 | * | ||
254 | * place @page on active or unevictable LRU list, depending on | ||
255 | * page_evictable(). Note that if the page is not evictable, | ||
256 | * it goes directly back onto it's zone's unevictable list. It does | ||
257 | * NOT use a per cpu pagevec. | ||
258 | */ | ||
259 | void lru_cache_add_active_or_unevictable(struct page *page, | ||
260 | struct vm_area_struct *vma) | ||
261 | { | ||
262 | if (page_evictable(page, vma)) | ||
263 | lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page)); | ||
264 | else | ||
265 | add_page_to_unevictable_list(page); | ||
266 | } | ||
267 | |||
268 | /* | 249 | /* |
269 | * Drain pages out of the cpu's pagevecs. | 250 | * Drain pages out of the cpu's pagevecs. |
270 | * Either "cpu" is the current CPU, and preemption has already been | 251 | * Either "cpu" is the current CPU, and preemption has already been |