aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c53
1 files changed, 44 insertions, 9 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index dbc2ca2057a5..a0e92a263d12 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -36,7 +36,6 @@
36 * mapping->tree_lock (widely used, in set_page_dirty, 36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock, 37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode) 38 * within inode_lock in __sync_single_inode)
39 * zone->lock (within radix tree node alloc)
40 */ 39 */
41 40
42#include <linux/mm.h> 41#include <linux/mm.h>
@@ -49,6 +48,7 @@
49#include <linux/rcupdate.h> 48#include <linux/rcupdate.h>
50#include <linux/module.h> 49#include <linux/module.h>
51#include <linux/kallsyms.h> 50#include <linux/kallsyms.h>
51#include <linux/memcontrol.h>
52 52
53#include <asm/tlbflush.h> 53#include <asm/tlbflush.h>
54 54
@@ -284,7 +284,10 @@ static int page_referenced_one(struct page *page,
284 if (!pte) 284 if (!pte)
285 goto out; 285 goto out;
286 286
287 if (ptep_clear_flush_young(vma, address, pte)) 287 if (vma->vm_flags & VM_LOCKED) {
288 referenced++;
289 *mapcount = 1; /* break early from loop */
290 } else if (ptep_clear_flush_young(vma, address, pte))
288 referenced++; 291 referenced++;
289 292
290 /* Pretend the page is referenced if the task has the 293 /* Pretend the page is referenced if the task has the
@@ -299,7 +302,8 @@ out:
299 return referenced; 302 return referenced;
300} 303}
301 304
302static int page_referenced_anon(struct page *page) 305static int page_referenced_anon(struct page *page,
306 struct mem_cgroup *mem_cont)
303{ 307{
304 unsigned int mapcount; 308 unsigned int mapcount;
305 struct anon_vma *anon_vma; 309 struct anon_vma *anon_vma;
@@ -312,6 +316,13 @@ static int page_referenced_anon(struct page *page)
312 316
313 mapcount = page_mapcount(page); 317 mapcount = page_mapcount(page);
314 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 318 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
319 /*
320 * If we are reclaiming on behalf of a cgroup, skip
321 * counting on behalf of references from different
322 * cgroups
323 */
324 if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
325 continue;
315 referenced += page_referenced_one(page, vma, &mapcount); 326 referenced += page_referenced_one(page, vma, &mapcount);
316 if (!mapcount) 327 if (!mapcount)
317 break; 328 break;
@@ -332,7 +343,8 @@ static int page_referenced_anon(struct page *page)
332 * 343 *
333 * This function is only called from page_referenced for object-based pages. 344 * This function is only called from page_referenced for object-based pages.
334 */ 345 */
335static int page_referenced_file(struct page *page) 346static int page_referenced_file(struct page *page,
347 struct mem_cgroup *mem_cont)
336{ 348{
337 unsigned int mapcount; 349 unsigned int mapcount;
338 struct address_space *mapping = page->mapping; 350 struct address_space *mapping = page->mapping;
@@ -365,6 +377,13 @@ static int page_referenced_file(struct page *page)
365 mapcount = page_mapcount(page); 377 mapcount = page_mapcount(page);
366 378
367 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 379 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
380 /*
381 * If we are reclaiming on behalf of a cgroup, skip
382 * counting on behalf of references from different
383 * cgroups
384 */
385 if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
386 continue;
368 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) 387 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
369 == (VM_LOCKED|VM_MAYSHARE)) { 388 == (VM_LOCKED|VM_MAYSHARE)) {
370 referenced++; 389 referenced++;
@@ -387,7 +406,8 @@ static int page_referenced_file(struct page *page)
387 * Quick test_and_clear_referenced for all mappings to a page, 406 * Quick test_and_clear_referenced for all mappings to a page,
388 * returns the number of ptes which referenced the page. 407 * returns the number of ptes which referenced the page.
389 */ 408 */
390int page_referenced(struct page *page, int is_locked) 409int page_referenced(struct page *page, int is_locked,
410 struct mem_cgroup *mem_cont)
391{ 411{
392 int referenced = 0; 412 int referenced = 0;
393 413
@@ -399,14 +419,15 @@ int page_referenced(struct page *page, int is_locked)
399 419
400 if (page_mapped(page) && page->mapping) { 420 if (page_mapped(page) && page->mapping) {
401 if (PageAnon(page)) 421 if (PageAnon(page))
402 referenced += page_referenced_anon(page); 422 referenced += page_referenced_anon(page, mem_cont);
403 else if (is_locked) 423 else if (is_locked)
404 referenced += page_referenced_file(page); 424 referenced += page_referenced_file(page, mem_cont);
405 else if (TestSetPageLocked(page)) 425 else if (TestSetPageLocked(page))
406 referenced++; 426 referenced++;
407 else { 427 else {
408 if (page->mapping) 428 if (page->mapping)
409 referenced += page_referenced_file(page); 429 referenced +=
430 page_referenced_file(page, mem_cont);
410 unlock_page(page); 431 unlock_page(page);
411 } 432 }
412 } 433 }
@@ -552,8 +573,14 @@ void page_add_anon_rmap(struct page *page,
552 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 573 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
553 if (atomic_inc_and_test(&page->_mapcount)) 574 if (atomic_inc_and_test(&page->_mapcount))
554 __page_set_anon_rmap(page, vma, address); 575 __page_set_anon_rmap(page, vma, address);
555 else 576 else {
556 __page_check_anon_rmap(page, vma, address); 577 __page_check_anon_rmap(page, vma, address);
578 /*
579 * We unconditionally charged during prepare, we uncharge here
580 * This takes care of balancing the reference counts
581 */
582 mem_cgroup_uncharge_page(page);
583 }
557} 584}
558 585
559/* 586/*
@@ -584,6 +611,12 @@ void page_add_file_rmap(struct page *page)
584{ 611{
585 if (atomic_inc_and_test(&page->_mapcount)) 612 if (atomic_inc_and_test(&page->_mapcount))
586 __inc_zone_page_state(page, NR_FILE_MAPPED); 613 __inc_zone_page_state(page, NR_FILE_MAPPED);
614 else
615 /*
616 * We unconditionally charged during prepare, we uncharge here
617 * This takes care of balancing the reference counts
618 */
619 mem_cgroup_uncharge_page(page);
587} 620}
588 621
589#ifdef CONFIG_DEBUG_VM 622#ifdef CONFIG_DEBUG_VM
@@ -644,6 +677,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
644 page_clear_dirty(page); 677 page_clear_dirty(page);
645 set_page_dirty(page); 678 set_page_dirty(page);
646 } 679 }
680 mem_cgroup_uncharge_page(page);
681
647 __dec_zone_page_state(page, 682 __dec_zone_page_state(page,
648 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 683 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
649 } 684 }