aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3270ce7375db..128f45c16fa6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -492,6 +492,49 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
492 } 492 }
493 } 493 }
494} 494}
495/*
496 * Returns non-zero if a page (under migration) has valid page_cgroup member.
497 * Refcnt of page_cgroup is incremented.
498 */
499
500int mem_cgroup_prepare_migration(struct page *page)
501{
502 struct page_cgroup *pc;
503 int ret = 0;
504 lock_page_cgroup(page);
505 pc = page_get_page_cgroup(page);
506 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
507 ret = 1;
508 unlock_page_cgroup(page);
509 return ret;
510}
511
512void mem_cgroup_end_migration(struct page *page)
513{
514 struct page_cgroup *pc = page_get_page_cgroup(page);
515 mem_cgroup_uncharge(pc);
516}
517/*
518 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
519 * And no race with uncharge() routines because page_cgroup for *page*
520 * has extra one reference by mem_cgroup_prepare_migration.
521 */
522
523void mem_cgroup_page_migration(struct page *page, struct page *newpage)
524{
525 struct page_cgroup *pc;
526retry:
527 pc = page_get_page_cgroup(page);
528 if (!pc)
529 return;
530 if (clear_page_cgroup(page, pc) != pc)
531 goto retry;
532 pc->page = newpage;
533 lock_page_cgroup(newpage);
534 page_assign_page_cgroup(newpage, pc);
535 unlock_page_cgroup(newpage);
536 return;
537}
495 538
496int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) 539int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
497{ 540{