aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c47
1 files changed, 40 insertions, 7 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 57ad276900c9..a0e92a263d12 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -48,6 +48,7 @@
48#include <linux/rcupdate.h> 48#include <linux/rcupdate.h>
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/kallsyms.h> 50#include <linux/kallsyms.h>
51#include <linux/memcontrol.h>
51 52
52#include <asm/tlbflush.h> 53#include <asm/tlbflush.h>
53 54
@@ -301,7 +302,8 @@ out:
301 return referenced; 302 return referenced;
302} 303}
303 304
304static int page_referenced_anon(struct page *page) 305static int page_referenced_anon(struct page *page,
306 struct mem_cgroup *mem_cont)
305{ 307{
306 unsigned int mapcount; 308 unsigned int mapcount;
307 struct anon_vma *anon_vma; 309 struct anon_vma *anon_vma;
@@ -314,6 +316,13 @@ static int page_referenced_anon(struct page *page)
314 316
315 mapcount = page_mapcount(page); 317 mapcount = page_mapcount(page);
316 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 318 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
319 /*
320 * If we are reclaiming on behalf of a cgroup, skip
321 * counting on behalf of references from different
322 * cgroups
323 */
324 if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
325 continue;
317 referenced += page_referenced_one(page, vma, &mapcount); 326 referenced += page_referenced_one(page, vma, &mapcount);
318 if (!mapcount) 327 if (!mapcount)
319 break; 328 break;
@@ -334,7 +343,8 @@ static int page_referenced_anon(struct page *page)
334 * 343 *
335 * This function is only called from page_referenced for object-based pages. 344 * This function is only called from page_referenced for object-based pages.
336 */ 345 */
337static int page_referenced_file(struct page *page) 346static int page_referenced_file(struct page *page,
347 struct mem_cgroup *mem_cont)
338{ 348{
339 unsigned int mapcount; 349 unsigned int mapcount;
340 struct address_space *mapping = page->mapping; 350 struct address_space *mapping = page->mapping;
@@ -367,6 +377,13 @@ static int page_referenced_file(struct page *page)
367 mapcount = page_mapcount(page); 377 mapcount = page_mapcount(page);
368 378
369 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 379 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
380 /*
381 * If we are reclaiming on behalf of a cgroup, skip
382 * counting on behalf of references from different
383 * cgroups
384 */
385 if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
386 continue;
370 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) 387 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
371 == (VM_LOCKED|VM_MAYSHARE)) { 388 == (VM_LOCKED|VM_MAYSHARE)) {
372 referenced++; 389 referenced++;
@@ -389,7 +406,8 @@ static int page_referenced_file(struct page *page)
389 * Quick test_and_clear_referenced for all mappings to a page, 406 * Quick test_and_clear_referenced for all mappings to a page,
390 * returns the number of ptes which referenced the page. 407 * returns the number of ptes which referenced the page.
391 */ 408 */
392int page_referenced(struct page *page, int is_locked) 409int page_referenced(struct page *page, int is_locked,
410 struct mem_cgroup *mem_cont)
393{ 411{
394 int referenced = 0; 412 int referenced = 0;
395 413
@@ -401,14 +419,15 @@ int page_referenced(struct page *page, int is_locked)
401 419
402 if (page_mapped(page) && page->mapping) { 420 if (page_mapped(page) && page->mapping) {
403 if (PageAnon(page)) 421 if (PageAnon(page))
404 referenced += page_referenced_anon(page); 422 referenced += page_referenced_anon(page, mem_cont);
405 else if (is_locked) 423 else if (is_locked)
406 referenced += page_referenced_file(page); 424 referenced += page_referenced_file(page, mem_cont);
407 else if (TestSetPageLocked(page)) 425 else if (TestSetPageLocked(page))
408 referenced++; 426 referenced++;
409 else { 427 else {
410 if (page->mapping) 428 if (page->mapping)
411 referenced += page_referenced_file(page); 429 referenced +=
430 page_referenced_file(page, mem_cont);
412 unlock_page(page); 431 unlock_page(page);
413 } 432 }
414 } 433 }
@@ -554,8 +573,14 @@ void page_add_anon_rmap(struct page *page,
554 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 573 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
555 if (atomic_inc_and_test(&page->_mapcount)) 574 if (atomic_inc_and_test(&page->_mapcount))
556 __page_set_anon_rmap(page, vma, address); 575 __page_set_anon_rmap(page, vma, address);
557 else 576 else {
558 __page_check_anon_rmap(page, vma, address); 577 __page_check_anon_rmap(page, vma, address);
578 /*
579 * We unconditionally charged during prepare, we uncharge here
580 * This takes care of balancing the reference counts
581 */
582 mem_cgroup_uncharge_page(page);
583 }
559} 584}
560 585
561/* 586/*
@@ -586,6 +611,12 @@ void page_add_file_rmap(struct page *page)
586{ 611{
587 if (atomic_inc_and_test(&page->_mapcount)) 612 if (atomic_inc_and_test(&page->_mapcount))
588 __inc_zone_page_state(page, NR_FILE_MAPPED); 613 __inc_zone_page_state(page, NR_FILE_MAPPED);
614 else
615 /*
616 * We unconditionally charged during prepare, we uncharge here
617 * This takes care of balancing the reference counts
618 */
619 mem_cgroup_uncharge_page(page);
589} 620}
590 621
591#ifdef CONFIG_DEBUG_VM 622#ifdef CONFIG_DEBUG_VM
@@ -646,6 +677,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
646 page_clear_dirty(page); 677 page_clear_dirty(page);
647 set_page_dirty(page); 678 set_page_dirty(page);
648 } 679 }
680 mem_cgroup_uncharge_page(page);
681
649 __dec_zone_page_state(page, 682 __dec_zone_page_state(page,
650 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 683 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
651 } 684 }