diff options
author | Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> | 2011-11-02 16:38:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-02 19:06:59 -0400 |
commit | c0ff4b8540a5c158b8e5bafb7d767298b67b0b92 (patch) | |
tree | a47a2bcd0b7b80056cde7ba6b1263aae78f77212 /mm | |
parent | ff7ee93f47151e23601856e7eb5510babf956571 (diff) |
memcg: rename mem variable to memcg
The memcg code sometimes uses "struct mem_cgroup *mem" and sometimes uses
"struct mem_cgroup *memcg". Rename all mem variables to memcg in source
file.
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 930 |
1 files changed, 467 insertions, 463 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2d5755544afe..9e38abdbfd95 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -201,8 +201,8 @@ struct mem_cgroup_eventfd_list { | |||
201 | struct eventfd_ctx *eventfd; | 201 | struct eventfd_ctx *eventfd; |
202 | }; | 202 | }; |
203 | 203 | ||
204 | static void mem_cgroup_threshold(struct mem_cgroup *mem); | 204 | static void mem_cgroup_threshold(struct mem_cgroup *memcg); |
205 | static void mem_cgroup_oom_notify(struct mem_cgroup *mem); | 205 | static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); |
206 | 206 | ||
207 | /* | 207 | /* |
208 | * The memory controller data structure. The memory controller controls both | 208 | * The memory controller data structure. The memory controller controls both |
@@ -362,29 +362,29 @@ enum charge_type { | |||
362 | #define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2 | 362 | #define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2 |
363 | #define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT) | 363 | #define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT) |
364 | 364 | ||
365 | static void mem_cgroup_get(struct mem_cgroup *mem); | 365 | static void mem_cgroup_get(struct mem_cgroup *memcg); |
366 | static void mem_cgroup_put(struct mem_cgroup *mem); | 366 | static void mem_cgroup_put(struct mem_cgroup *memcg); |
367 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); | 367 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); |
368 | static void drain_all_stock_async(struct mem_cgroup *mem); | 368 | static void drain_all_stock_async(struct mem_cgroup *memcg); |
369 | 369 | ||
370 | static struct mem_cgroup_per_zone * | 370 | static struct mem_cgroup_per_zone * |
371 | mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) | 371 | mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) |
372 | { | 372 | { |
373 | return &mem->info.nodeinfo[nid]->zoneinfo[zid]; | 373 | return &memcg->info.nodeinfo[nid]->zoneinfo[zid]; |
374 | } | 374 | } |
375 | 375 | ||
376 | struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) | 376 | struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) |
377 | { | 377 | { |
378 | return &mem->css; | 378 | return &memcg->css; |
379 | } | 379 | } |
380 | 380 | ||
381 | static struct mem_cgroup_per_zone * | 381 | static struct mem_cgroup_per_zone * |
382 | page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page) | 382 | page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) |
383 | { | 383 | { |
384 | int nid = page_to_nid(page); | 384 | int nid = page_to_nid(page); |
385 | int zid = page_zonenum(page); | 385 | int zid = page_zonenum(page); |
386 | 386 | ||
387 | return mem_cgroup_zoneinfo(mem, nid, zid); | 387 | return mem_cgroup_zoneinfo(memcg, nid, zid); |
388 | } | 388 | } |
389 | 389 | ||
390 | static struct mem_cgroup_tree_per_zone * | 390 | static struct mem_cgroup_tree_per_zone * |
@@ -403,7 +403,7 @@ soft_limit_tree_from_page(struct page *page) | |||
403 | } | 403 | } |
404 | 404 | ||
405 | static void | 405 | static void |
406 | __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, | 406 | __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg, |
407 | struct mem_cgroup_per_zone *mz, | 407 | struct mem_cgroup_per_zone *mz, |
408 | struct mem_cgroup_tree_per_zone *mctz, | 408 | struct mem_cgroup_tree_per_zone *mctz, |
409 | unsigned long long new_usage_in_excess) | 409 | unsigned long long new_usage_in_excess) |
@@ -437,7 +437,7 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, | |||
437 | } | 437 | } |
438 | 438 | ||
439 | static void | 439 | static void |
440 | __mem_cgroup_remove_exceeded(struct mem_cgroup *mem, | 440 | __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, |
441 | struct mem_cgroup_per_zone *mz, | 441 | struct mem_cgroup_per_zone *mz, |
442 | struct mem_cgroup_tree_per_zone *mctz) | 442 | struct mem_cgroup_tree_per_zone *mctz) |
443 | { | 443 | { |
@@ -448,17 +448,17 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem, | |||
448 | } | 448 | } |
449 | 449 | ||
450 | static void | 450 | static void |
451 | mem_cgroup_remove_exceeded(struct mem_cgroup *mem, | 451 | mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, |
452 | struct mem_cgroup_per_zone *mz, | 452 | struct mem_cgroup_per_zone *mz, |
453 | struct mem_cgroup_tree_per_zone *mctz) | 453 | struct mem_cgroup_tree_per_zone *mctz) |
454 | { | 454 | { |
455 | spin_lock(&mctz->lock); | 455 | spin_lock(&mctz->lock); |
456 | __mem_cgroup_remove_exceeded(mem, mz, mctz); | 456 | __mem_cgroup_remove_exceeded(memcg, mz, mctz); |
457 | spin_unlock(&mctz->lock); | 457 | spin_unlock(&mctz->lock); |
458 | } | 458 | } |
459 | 459 | ||
460 | 460 | ||
461 | static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) | 461 | static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) |
462 | { | 462 | { |
463 | unsigned long long excess; | 463 | unsigned long long excess; |
464 | struct mem_cgroup_per_zone *mz; | 464 | struct mem_cgroup_per_zone *mz; |
@@ -471,9 +471,9 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) | |||
471 | * Necessary to update all ancestors when hierarchy is used. | 471 | * Necessary to update all ancestors when hierarchy is used. |
472 | * because their event counter is not touched. | 472 | * because their event counter is not touched. |
473 | */ | 473 | */ |
474 | for (; mem; mem = parent_mem_cgroup(mem)) { | 474 | for (; memcg; memcg = parent_mem_cgroup(memcg)) { |
475 | mz = mem_cgroup_zoneinfo(mem, nid, zid); | 475 | mz = mem_cgroup_zoneinfo(memcg, nid, zid); |
476 | excess = res_counter_soft_limit_excess(&mem->res); | 476 | excess = res_counter_soft_limit_excess(&memcg->res); |
477 | /* | 477 | /* |
478 | * We have to update the tree if mz is on RB-tree or | 478 | * We have to update the tree if mz is on RB-tree or |
479 | * mem is over its softlimit. | 479 | * mem is over its softlimit. |
@@ -482,18 +482,18 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) | |||
482 | spin_lock(&mctz->lock); | 482 | spin_lock(&mctz->lock); |
483 | /* if on-tree, remove it */ | 483 | /* if on-tree, remove it */ |
484 | if (mz->on_tree) | 484 | if (mz->on_tree) |
485 | __mem_cgroup_remove_exceeded(mem, mz, mctz); | 485 | __mem_cgroup_remove_exceeded(memcg, mz, mctz); |
486 | /* | 486 | /* |
487 | * Insert again. mz->usage_in_excess will be updated. | 487 | * Insert again. mz->usage_in_excess will be updated. |
488 | * If excess is 0, no tree ops. | 488 | * If excess is 0, no tree ops. |
489 | */ | 489 | */ |
490 | __mem_cgroup_insert_exceeded(mem, mz, mctz, excess); | 490 | __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess); |
491 | spin_unlock(&mctz->lock); | 491 | spin_unlock(&mctz->lock); |
492 | } | 492 | } |
493 | } | 493 | } |
494 | } | 494 | } |
495 | 495 | ||
496 | static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) | 496 | static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) |
497 | { | 497 | { |
498 | int node, zone; | 498 | int node, zone; |
499 | struct mem_cgroup_per_zone *mz; | 499 | struct mem_cgroup_per_zone *mz; |
@@ -501,9 +501,9 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) | |||
501 | 501 | ||
502 | for_each_node_state(node, N_POSSIBLE) { | 502 | for_each_node_state(node, N_POSSIBLE) { |
503 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 503 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
504 | mz = mem_cgroup_zoneinfo(mem, node, zone); | 504 | mz = mem_cgroup_zoneinfo(memcg, node, zone); |
505 | mctz = soft_limit_tree_node_zone(node, zone); | 505 | mctz = soft_limit_tree_node_zone(node, zone); |
506 | mem_cgroup_remove_exceeded(mem, mz, mctz); | 506 | mem_cgroup_remove_exceeded(memcg, mz, mctz); |
507 | } | 507 | } |
508 | } | 508 | } |
509 | } | 509 | } |
@@ -564,7 +564,7 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | |||
564 | * common workload, threashold and synchonization as vmstat[] should be | 564 | * common workload, threashold and synchonization as vmstat[] should be |
565 | * implemented. | 565 | * implemented. |
566 | */ | 566 | */ |
567 | static long mem_cgroup_read_stat(struct mem_cgroup *mem, | 567 | static long mem_cgroup_read_stat(struct mem_cgroup *memcg, |
568 | enum mem_cgroup_stat_index idx) | 568 | enum mem_cgroup_stat_index idx) |
569 | { | 569 | { |
570 | long val = 0; | 570 | long val = 0; |
@@ -572,81 +572,83 @@ static long mem_cgroup_read_stat(struct mem_cgroup *mem, | |||
572 | 572 | ||
573 | get_online_cpus(); | 573 | get_online_cpus(); |
574 | for_each_online_cpu(cpu) | 574 | for_each_online_cpu(cpu) |
575 | val += per_cpu(mem->stat->count[idx], cpu); | 575 | val += per_cpu(memcg->stat->count[idx], cpu); |
576 | #ifdef CONFIG_HOTPLUG_CPU | 576 | #ifdef CONFIG_HOTPLUG_CPU |
577 | spin_lock(&mem->pcp_counter_lock); | 577 | spin_lock(&memcg->pcp_counter_lock); |
578 | val += mem->nocpu_base.count[idx]; | 578 | val += memcg->nocpu_base.count[idx]; |
579 | spin_unlock(&mem->pcp_counter_lock); | 579 | spin_unlock(&memcg->pcp_counter_lock); |
580 | #endif | 580 | #endif |
581 | put_online_cpus(); | 581 | put_online_cpus(); |
582 | return val; | 582 | return val; |
583 | } | 583 | } |
584 | 584 | ||
585 | static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, | 585 | static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, |
586 | bool charge) | 586 | bool charge) |
587 | { | 587 | { |
588 | int val = (charge) ? 1 : -1; | 588 | int val = (charge) ? 1 : -1; |
589 | this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); | 589 | this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); |
590 | } | 590 | } |
591 | 591 | ||
592 | void mem_cgroup_pgfault(struct mem_cgroup *mem, int val) | 592 | void mem_cgroup_pgfault(struct mem_cgroup *memcg, int val) |
593 | { | 593 | { |
594 | this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val); | 594 | this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val); |
595 | } | 595 | } |
596 | 596 | ||
597 | void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val) | 597 | void mem_cgroup_pgmajfault(struct mem_cgroup *memcg, int val) |
598 | { | 598 | { |
599 | this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val); | 599 | this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val); |
600 | } | 600 | } |
601 | 601 | ||
602 | static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, | 602 | static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, |
603 | enum mem_cgroup_events_index idx) | 603 | enum mem_cgroup_events_index idx) |
604 | { | 604 | { |
605 | unsigned long val = 0; | 605 | unsigned long val = 0; |
606 | int cpu; | 606 | int cpu; |
607 | 607 | ||
608 | for_each_online_cpu(cpu) | 608 | for_each_online_cpu(cpu) |
609 | val += per_cpu(mem->stat->events[idx], cpu); | 609 | val += per_cpu(memcg->stat->events[idx], cpu); |
610 | #ifdef CONFIG_HOTPLUG_CPU | 610 | #ifdef CONFIG_HOTPLUG_CPU |
611 | spin_lock(&mem->pcp_counter_lock); | 611 | spin_lock(&memcg->pcp_counter_lock); |
612 | val += mem->nocpu_base.events[idx]; | 612 | val += memcg->nocpu_base.events[idx]; |
613 | spin_unlock(&mem->pcp_counter_lock); | 613 | spin_unlock(&memcg->pcp_counter_lock); |
614 | #endif | 614 | #endif |
615 | return val; | 615 | return val; |
616 | } | 616 | } |
617 | 617 | ||
618 | static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, | 618 | static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, |
619 | bool file, int nr_pages) | 619 | bool file, int nr_pages) |
620 | { | 620 | { |
621 | preempt_disable(); | 621 | preempt_disable(); |
622 | 622 | ||
623 | if (file) | 623 | if (file) |
624 | __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); | 624 | __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], |
625 | nr_pages); | ||
625 | else | 626 | else |
626 | __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); | 627 | __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], |
628 | nr_pages); | ||
627 | 629 | ||
628 | /* pagein of a big page is an event. So, ignore page size */ | 630 | /* pagein of a big page is an event. So, ignore page size */ |
629 | if (nr_pages > 0) | 631 | if (nr_pages > 0) |
630 | __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); | 632 | __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); |
631 | else { | 633 | else { |
632 | __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); | 634 | __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); |
633 | nr_pages = -nr_pages; /* for event */ | 635 | nr_pages = -nr_pages; /* for event */ |
634 | } | 636 | } |
635 | 637 | ||
636 | __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); | 638 | __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); |
637 | 639 | ||
638 | preempt_enable(); | 640 | preempt_enable(); |
639 | } | 641 | } |
640 | 642 | ||
641 | unsigned long | 643 | unsigned long |
642 | mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid, | 644 | mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, |
643 | unsigned int lru_mask) | 645 | unsigned int lru_mask) |
644 | { | 646 | { |
645 | struct mem_cgroup_per_zone *mz; | 647 | struct mem_cgroup_per_zone *mz; |
646 | enum lru_list l; | 648 | enum lru_list l; |
647 | unsigned long ret = 0; | 649 | unsigned long ret = 0; |
648 | 650 | ||
649 | mz = mem_cgroup_zoneinfo(mem, nid, zid); | 651 | mz = mem_cgroup_zoneinfo(memcg, nid, zid); |
650 | 652 | ||
651 | for_each_lru(l) { | 653 | for_each_lru(l) { |
652 | if (BIT(l) & lru_mask) | 654 | if (BIT(l) & lru_mask) |
@@ -656,44 +658,45 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid, | |||
656 | } | 658 | } |
657 | 659 | ||
658 | static unsigned long | 660 | static unsigned long |
659 | mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem, | 661 | mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, |
660 | int nid, unsigned int lru_mask) | 662 | int nid, unsigned int lru_mask) |
661 | { | 663 | { |
662 | u64 total = 0; | 664 | u64 total = 0; |
663 | int zid; | 665 | int zid; |
664 | 666 | ||
665 | for (zid = 0; zid < MAX_NR_ZONES; zid++) | 667 | for (zid = 0; zid < MAX_NR_ZONES; zid++) |
666 | total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask); | 668 | total += mem_cgroup_zone_nr_lru_pages(memcg, |
669 | nid, zid, lru_mask); | ||
667 | 670 | ||
668 | return total; | 671 | return total; |
669 | } | 672 | } |
670 | 673 | ||
671 | static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem, | 674 | static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, |
672 | unsigned int lru_mask) | 675 | unsigned int lru_mask) |
673 | { | 676 | { |
674 | int nid; | 677 | int nid; |
675 | u64 total = 0; | 678 | u64 total = 0; |
676 | 679 | ||
677 | for_each_node_state(nid, N_HIGH_MEMORY) | 680 | for_each_node_state(nid, N_HIGH_MEMORY) |
678 | total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask); | 681 | total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); |
679 | return total; | 682 | return total; |
680 | } | 683 | } |
681 | 684 | ||
682 | static bool __memcg_event_check(struct mem_cgroup *mem, int target) | 685 | static bool __memcg_event_check(struct mem_cgroup *memcg, int target) |
683 | { | 686 | { |
684 | unsigned long val, next; | 687 | unsigned long val, next; |
685 | 688 | ||
686 | val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); | 689 | val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); |
687 | next = this_cpu_read(mem->stat->targets[target]); | 690 | next = this_cpu_read(memcg->stat->targets[target]); |
688 | /* from time_after() in jiffies.h */ | 691 | /* from time_after() in jiffies.h */ |
689 | return ((long)next - (long)val < 0); | 692 | return ((long)next - (long)val < 0); |
690 | } | 693 | } |
691 | 694 | ||
692 | static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) | 695 | static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target) |
693 | { | 696 | { |
694 | unsigned long val, next; | 697 | unsigned long val, next; |
695 | 698 | ||
696 | val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); | 699 | val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); |
697 | 700 | ||
698 | switch (target) { | 701 | switch (target) { |
699 | case MEM_CGROUP_TARGET_THRESH: | 702 | case MEM_CGROUP_TARGET_THRESH: |
@@ -709,30 +712,30 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target) | |||
709 | return; | 712 | return; |
710 | } | 713 | } |
711 | 714 | ||
712 | this_cpu_write(mem->stat->targets[target], next); | 715 | this_cpu_write(memcg->stat->targets[target], next); |
713 | } | 716 | } |
714 | 717 | ||
715 | /* | 718 | /* |
716 | * Check events in order. | 719 | * Check events in order. |
717 | * | 720 | * |
718 | */ | 721 | */ |
719 | static void memcg_check_events(struct mem_cgroup *mem, struct page *page) | 722 | static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) |
720 | { | 723 | { |
721 | /* threshold event is triggered in finer grain than soft limit */ | 724 | /* threshold event is triggered in finer grain than soft limit */ |
722 | if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) { | 725 | if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) { |
723 | mem_cgroup_threshold(mem); | 726 | mem_cgroup_threshold(memcg); |
724 | __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); | 727 | __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH); |
725 | if (unlikely(__memcg_event_check(mem, | 728 | if (unlikely(__memcg_event_check(memcg, |
726 | MEM_CGROUP_TARGET_SOFTLIMIT))) { | 729 | MEM_CGROUP_TARGET_SOFTLIMIT))) { |
727 | mem_cgroup_update_tree(mem, page); | 730 | mem_cgroup_update_tree(memcg, page); |
728 | __mem_cgroup_target_update(mem, | 731 | __mem_cgroup_target_update(memcg, |
729 | MEM_CGROUP_TARGET_SOFTLIMIT); | 732 | MEM_CGROUP_TARGET_SOFTLIMIT); |
730 | } | 733 | } |
731 | #if MAX_NUMNODES > 1 | 734 | #if MAX_NUMNODES > 1 |
732 | if (unlikely(__memcg_event_check(mem, | 735 | if (unlikely(__memcg_event_check(memcg, |
733 | MEM_CGROUP_TARGET_NUMAINFO))) { | 736 | MEM_CGROUP_TARGET_NUMAINFO))) { |
734 | atomic_inc(&mem->numainfo_events); | 737 | atomic_inc(&memcg->numainfo_events); |
735 | __mem_cgroup_target_update(mem, | 738 | __mem_cgroup_target_update(memcg, |
736 | MEM_CGROUP_TARGET_NUMAINFO); | 739 | MEM_CGROUP_TARGET_NUMAINFO); |
737 | } | 740 | } |
738 | #endif | 741 | #endif |
@@ -762,7 +765,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | |||
762 | 765 | ||
763 | struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) | 766 | struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) |
764 | { | 767 | { |
765 | struct mem_cgroup *mem = NULL; | 768 | struct mem_cgroup *memcg = NULL; |
766 | 769 | ||
767 | if (!mm) | 770 | if (!mm) |
768 | return NULL; | 771 | return NULL; |
@@ -773,25 +776,25 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) | |||
773 | */ | 776 | */ |
774 | rcu_read_lock(); | 777 | rcu_read_lock(); |
775 | do { | 778 | do { |
776 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 779 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
777 | if (unlikely(!mem)) | 780 | if (unlikely(!memcg)) |
778 | break; | 781 | break; |
779 | } while (!css_tryget(&mem->css)); | 782 | } while (!css_tryget(&memcg->css)); |
780 | rcu_read_unlock(); | 783 | rcu_read_unlock(); |
781 | return mem; | 784 | return memcg; |
782 | } | 785 | } |
783 | 786 | ||
784 | /* The caller has to guarantee "mem" exists before calling this */ | 787 | /* The caller has to guarantee "mem" exists before calling this */ |
785 | static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) | 788 | static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *memcg) |
786 | { | 789 | { |
787 | struct cgroup_subsys_state *css; | 790 | struct cgroup_subsys_state *css; |
788 | int found; | 791 | int found; |
789 | 792 | ||
790 | if (!mem) /* ROOT cgroup has the smallest ID */ | 793 | if (!memcg) /* ROOT cgroup has the smallest ID */ |
791 | return root_mem_cgroup; /*css_put/get against root is ignored*/ | 794 | return root_mem_cgroup; /*css_put/get against root is ignored*/ |
792 | if (!mem->use_hierarchy) { | 795 | if (!memcg->use_hierarchy) { |
793 | if (css_tryget(&mem->css)) | 796 | if (css_tryget(&memcg->css)) |
794 | return mem; | 797 | return memcg; |
795 | return NULL; | 798 | return NULL; |
796 | } | 799 | } |
797 | rcu_read_lock(); | 800 | rcu_read_lock(); |
@@ -799,13 +802,13 @@ static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) | |||
799 | * searching a memory cgroup which has the smallest ID under given | 802 | * searching a memory cgroup which has the smallest ID under given |
800 | * ROOT cgroup. (ID >= 1) | 803 | * ROOT cgroup. (ID >= 1) |
801 | */ | 804 | */ |
802 | css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); | 805 | css = css_get_next(&mem_cgroup_subsys, 1, &memcg->css, &found); |
803 | if (css && css_tryget(css)) | 806 | if (css && css_tryget(css)) |
804 | mem = container_of(css, struct mem_cgroup, css); | 807 | memcg = container_of(css, struct mem_cgroup, css); |
805 | else | 808 | else |
806 | mem = NULL; | 809 | memcg = NULL; |
807 | rcu_read_unlock(); | 810 | rcu_read_unlock(); |
808 | return mem; | 811 | return memcg; |
809 | } | 812 | } |
810 | 813 | ||
811 | static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, | 814 | static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, |
@@ -859,29 +862,29 @@ static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, | |||
859 | for_each_mem_cgroup_tree_cond(iter, NULL, true) | 862 | for_each_mem_cgroup_tree_cond(iter, NULL, true) |
860 | 863 | ||
861 | 864 | ||
862 | static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) | 865 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
863 | { | 866 | { |
864 | return (mem == root_mem_cgroup); | 867 | return (memcg == root_mem_cgroup); |
865 | } | 868 | } |
866 | 869 | ||
867 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | 870 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) |
868 | { | 871 | { |
869 | struct mem_cgroup *mem; | 872 | struct mem_cgroup *memcg; |
870 | 873 | ||
871 | if (!mm) | 874 | if (!mm) |
872 | return; | 875 | return; |
873 | 876 | ||
874 | rcu_read_lock(); | 877 | rcu_read_lock(); |
875 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 878 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
876 | if (unlikely(!mem)) | 879 | if (unlikely(!memcg)) |
877 | goto out; | 880 | goto out; |
878 | 881 | ||
879 | switch (idx) { | 882 | switch (idx) { |
880 | case PGMAJFAULT: | 883 | case PGMAJFAULT: |
881 | mem_cgroup_pgmajfault(mem, 1); | 884 | mem_cgroup_pgmajfault(memcg, 1); |
882 | break; | 885 | break; |
883 | case PGFAULT: | 886 | case PGFAULT: |
884 | mem_cgroup_pgfault(mem, 1); | 887 | mem_cgroup_pgfault(memcg, 1); |
885 | break; | 888 | break; |
886 | default: | 889 | default: |
887 | BUG(); | 890 | BUG(); |
@@ -1063,21 +1066,21 @@ void mem_cgroup_move_lists(struct page *page, | |||
1063 | } | 1066 | } |
1064 | 1067 | ||
1065 | /* | 1068 | /* |
1066 | * Checks whether given mem is same or in the root_mem's | 1069 | * Checks whether given mem is same or in the root_mem_cgroup's |
1067 | * hierarchy subtree | 1070 | * hierarchy subtree |
1068 | */ | 1071 | */ |
1069 | static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem, | 1072 | static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, |
1070 | struct mem_cgroup *mem) | 1073 | struct mem_cgroup *memcg) |
1071 | { | 1074 | { |
1072 | if (root_mem != mem) { | 1075 | if (root_memcg != memcg) { |
1073 | return (root_mem->use_hierarchy && | 1076 | return (root_memcg->use_hierarchy && |
1074 | css_is_ancestor(&mem->css, &root_mem->css)); | 1077 | css_is_ancestor(&memcg->css, &root_memcg->css)); |
1075 | } | 1078 | } |
1076 | 1079 | ||
1077 | return true; | 1080 | return true; |
1078 | } | 1081 | } |
1079 | 1082 | ||
1080 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | 1083 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) |
1081 | { | 1084 | { |
1082 | int ret; | 1085 | int ret; |
1083 | struct mem_cgroup *curr = NULL; | 1086 | struct mem_cgroup *curr = NULL; |
@@ -1091,12 +1094,12 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | |||
1091 | if (!curr) | 1094 | if (!curr) |
1092 | return 0; | 1095 | return 0; |
1093 | /* | 1096 | /* |
1094 | * We should check use_hierarchy of "mem" not "curr". Because checking | 1097 | * We should check use_hierarchy of "memcg" not "curr". Because checking |
1095 | * use_hierarchy of "curr" here make this function true if hierarchy is | 1098 | * use_hierarchy of "curr" here make this function true if hierarchy is |
1096 | * enabled in "curr" and "curr" is a child of "mem" in *cgroup* | 1099 | * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* |
1097 | * hierarchy(even if use_hierarchy is disabled in "mem"). | 1100 | * hierarchy(even if use_hierarchy is disabled in "memcg"). |
1098 | */ | 1101 | */ |
1099 | ret = mem_cgroup_same_or_subtree(mem, curr); | 1102 | ret = mem_cgroup_same_or_subtree(memcg, curr); |
1100 | css_put(&curr->css); | 1103 | css_put(&curr->css); |
1101 | return ret; | 1104 | return ret; |
1102 | } | 1105 | } |
@@ -1254,13 +1257,13 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
1254 | * Returns the maximum amount of memory @mem can be charged with, in | 1257 | * Returns the maximum amount of memory @mem can be charged with, in |
1255 | * pages. | 1258 | * pages. |
1256 | */ | 1259 | */ |
1257 | static unsigned long mem_cgroup_margin(struct mem_cgroup *mem) | 1260 | static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) |
1258 | { | 1261 | { |
1259 | unsigned long long margin; | 1262 | unsigned long long margin; |
1260 | 1263 | ||
1261 | margin = res_counter_margin(&mem->res); | 1264 | margin = res_counter_margin(&memcg->res); |
1262 | if (do_swap_account) | 1265 | if (do_swap_account) |
1263 | margin = min(margin, res_counter_margin(&mem->memsw)); | 1266 | margin = min(margin, res_counter_margin(&memcg->memsw)); |
1264 | return margin >> PAGE_SHIFT; | 1267 | return margin >> PAGE_SHIFT; |
1265 | } | 1268 | } |
1266 | 1269 | ||
@@ -1275,33 +1278,33 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg) | |||
1275 | return memcg->swappiness; | 1278 | return memcg->swappiness; |
1276 | } | 1279 | } |
1277 | 1280 | ||
1278 | static void mem_cgroup_start_move(struct mem_cgroup *mem) | 1281 | static void mem_cgroup_start_move(struct mem_cgroup *memcg) |
1279 | { | 1282 | { |
1280 | int cpu; | 1283 | int cpu; |
1281 | 1284 | ||
1282 | get_online_cpus(); | 1285 | get_online_cpus(); |
1283 | spin_lock(&mem->pcp_counter_lock); | 1286 | spin_lock(&memcg->pcp_counter_lock); |
1284 | for_each_online_cpu(cpu) | 1287 | for_each_online_cpu(cpu) |
1285 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; | 1288 | per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; |
1286 | mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; | 1289 | memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; |
1287 | spin_unlock(&mem->pcp_counter_lock); | 1290 | spin_unlock(&memcg->pcp_counter_lock); |
1288 | put_online_cpus(); | 1291 | put_online_cpus(); |
1289 | 1292 | ||
1290 | synchronize_rcu(); | 1293 | synchronize_rcu(); |
1291 | } | 1294 | } |
1292 | 1295 | ||
1293 | static void mem_cgroup_end_move(struct mem_cgroup *mem) | 1296 | static void mem_cgroup_end_move(struct mem_cgroup *memcg) |
1294 | { | 1297 | { |
1295 | int cpu; | 1298 | int cpu; |
1296 | 1299 | ||
1297 | if (!mem) | 1300 | if (!memcg) |
1298 | return; | 1301 | return; |
1299 | get_online_cpus(); | 1302 | get_online_cpus(); |
1300 | spin_lock(&mem->pcp_counter_lock); | 1303 | spin_lock(&memcg->pcp_counter_lock); |
1301 | for_each_online_cpu(cpu) | 1304 | for_each_online_cpu(cpu) |
1302 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; | 1305 | per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; |
1303 | mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; | 1306 | memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; |
1304 | spin_unlock(&mem->pcp_counter_lock); | 1307 | spin_unlock(&memcg->pcp_counter_lock); |
1305 | put_online_cpus(); | 1308 | put_online_cpus(); |
1306 | } | 1309 | } |
1307 | /* | 1310 | /* |
@@ -1316,13 +1319,13 @@ static void mem_cgroup_end_move(struct mem_cgroup *mem) | |||
1316 | * waiting at hith-memory prressure caused by "move". | 1319 | * waiting at hith-memory prressure caused by "move". |
1317 | */ | 1320 | */ |
1318 | 1321 | ||
1319 | static bool mem_cgroup_stealed(struct mem_cgroup *mem) | 1322 | static bool mem_cgroup_stealed(struct mem_cgroup *memcg) |
1320 | { | 1323 | { |
1321 | VM_BUG_ON(!rcu_read_lock_held()); | 1324 | VM_BUG_ON(!rcu_read_lock_held()); |
1322 | return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; | 1325 | return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0; |
1323 | } | 1326 | } |
1324 | 1327 | ||
1325 | static bool mem_cgroup_under_move(struct mem_cgroup *mem) | 1328 | static bool mem_cgroup_under_move(struct mem_cgroup *memcg) |
1326 | { | 1329 | { |
1327 | struct mem_cgroup *from; | 1330 | struct mem_cgroup *from; |
1328 | struct mem_cgroup *to; | 1331 | struct mem_cgroup *to; |
@@ -1337,17 +1340,17 @@ static bool mem_cgroup_under_move(struct mem_cgroup *mem) | |||
1337 | if (!from) | 1340 | if (!from) |
1338 | goto unlock; | 1341 | goto unlock; |
1339 | 1342 | ||
1340 | ret = mem_cgroup_same_or_subtree(mem, from) | 1343 | ret = mem_cgroup_same_or_subtree(memcg, from) |
1341 | || mem_cgroup_same_or_subtree(mem, to); | 1344 | || mem_cgroup_same_or_subtree(memcg, to); |
1342 | unlock: | 1345 | unlock: |
1343 | spin_unlock(&mc.lock); | 1346 | spin_unlock(&mc.lock); |
1344 | return ret; | 1347 | return ret; |
1345 | } | 1348 | } |
1346 | 1349 | ||
1347 | static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) | 1350 | static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) |
1348 | { | 1351 | { |
1349 | if (mc.moving_task && current != mc.moving_task) { | 1352 | if (mc.moving_task && current != mc.moving_task) { |
1350 | if (mem_cgroup_under_move(mem)) { | 1353 | if (mem_cgroup_under_move(memcg)) { |
1351 | DEFINE_WAIT(wait); | 1354 | DEFINE_WAIT(wait); |
1352 | prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); | 1355 | prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); |
1353 | /* moving charge context might have finished. */ | 1356 | /* moving charge context might have finished. */ |
@@ -1431,12 +1434,12 @@ done: | |||
1431 | * This function returns the number of memcg under hierarchy tree. Returns | 1434 | * This function returns the number of memcg under hierarchy tree. Returns |
1432 | * 1(self count) if no children. | 1435 | * 1(self count) if no children. |
1433 | */ | 1436 | */ |
1434 | static int mem_cgroup_count_children(struct mem_cgroup *mem) | 1437 | static int mem_cgroup_count_children(struct mem_cgroup *memcg) |
1435 | { | 1438 | { |
1436 | int num = 0; | 1439 | int num = 0; |
1437 | struct mem_cgroup *iter; | 1440 | struct mem_cgroup *iter; |
1438 | 1441 | ||
1439 | for_each_mem_cgroup_tree(iter, mem) | 1442 | for_each_mem_cgroup_tree(iter, memcg) |
1440 | num++; | 1443 | num++; |
1441 | return num; | 1444 | return num; |
1442 | } | 1445 | } |
@@ -1466,21 +1469,21 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) | |||
1466 | * that to reclaim free pages from. | 1469 | * that to reclaim free pages from. |
1467 | */ | 1470 | */ |
1468 | static struct mem_cgroup * | 1471 | static struct mem_cgroup * |
1469 | mem_cgroup_select_victim(struct mem_cgroup *root_mem) | 1472 | mem_cgroup_select_victim(struct mem_cgroup *root_memcg) |
1470 | { | 1473 | { |
1471 | struct mem_cgroup *ret = NULL; | 1474 | struct mem_cgroup *ret = NULL; |
1472 | struct cgroup_subsys_state *css; | 1475 | struct cgroup_subsys_state *css; |
1473 | int nextid, found; | 1476 | int nextid, found; |
1474 | 1477 | ||
1475 | if (!root_mem->use_hierarchy) { | 1478 | if (!root_memcg->use_hierarchy) { |
1476 | css_get(&root_mem->css); | 1479 | css_get(&root_memcg->css); |
1477 | ret = root_mem; | 1480 | ret = root_memcg; |
1478 | } | 1481 | } |
1479 | 1482 | ||
1480 | while (!ret) { | 1483 | while (!ret) { |
1481 | rcu_read_lock(); | 1484 | rcu_read_lock(); |
1482 | nextid = root_mem->last_scanned_child + 1; | 1485 | nextid = root_memcg->last_scanned_child + 1; |
1483 | css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css, | 1486 | css = css_get_next(&mem_cgroup_subsys, nextid, &root_memcg->css, |
1484 | &found); | 1487 | &found); |
1485 | if (css && css_tryget(css)) | 1488 | if (css && css_tryget(css)) |
1486 | ret = container_of(css, struct mem_cgroup, css); | 1489 | ret = container_of(css, struct mem_cgroup, css); |
@@ -1489,9 +1492,9 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem) | |||
1489 | /* Updates scanning parameter */ | 1492 | /* Updates scanning parameter */ |
1490 | if (!css) { | 1493 | if (!css) { |
1491 | /* this means start scan from ID:1 */ | 1494 | /* this means start scan from ID:1 */ |
1492 | root_mem->last_scanned_child = 0; | 1495 | root_memcg->last_scanned_child = 0; |
1493 | } else | 1496 | } else |
1494 | root_mem->last_scanned_child = found; | 1497 | root_memcg->last_scanned_child = found; |
1495 | } | 1498 | } |
1496 | 1499 | ||
1497 | return ret; | 1500 | return ret; |
@@ -1507,14 +1510,14 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem) | |||
1507 | * reclaimable pages on a node. Returns true if there are any reclaimable | 1510 | * reclaimable pages on a node. Returns true if there are any reclaimable |
1508 | * pages in the node. | 1511 | * pages in the node. |
1509 | */ | 1512 | */ |
1510 | static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem, | 1513 | static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, |
1511 | int nid, bool noswap) | 1514 | int nid, bool noswap) |
1512 | { | 1515 | { |
1513 | if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE)) | 1516 | if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) |
1514 | return true; | 1517 | return true; |
1515 | if (noswap || !total_swap_pages) | 1518 | if (noswap || !total_swap_pages) |
1516 | return false; | 1519 | return false; |
1517 | if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON)) | 1520 | if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) |
1518 | return true; | 1521 | return true; |
1519 | return false; | 1522 | return false; |
1520 | 1523 | ||
@@ -1527,29 +1530,29 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem, | |||
1527 | * nodes based on the zonelist. So update the list loosely once per 10 secs. | 1530 | * nodes based on the zonelist. So update the list loosely once per 10 secs. |
1528 | * | 1531 | * |
1529 | */ | 1532 | */ |
1530 | static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) | 1533 | static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) |
1531 | { | 1534 | { |
1532 | int nid; | 1535 | int nid; |
1533 | /* | 1536 | /* |
1534 | * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET | 1537 | * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET |
1535 | * pagein/pageout changes since the last update. | 1538 | * pagein/pageout changes since the last update. |
1536 | */ | 1539 | */ |
1537 | if (!atomic_read(&mem->numainfo_events)) | 1540 | if (!atomic_read(&memcg->numainfo_events)) |
1538 | return; | 1541 | return; |
1539 | if (atomic_inc_return(&mem->numainfo_updating) > 1) | 1542 | if (atomic_inc_return(&memcg->numainfo_updating) > 1) |
1540 | return; | 1543 | return; |
1541 | 1544 | ||
1542 | /* make a nodemask where this memcg uses memory from */ | 1545 | /* make a nodemask where this memcg uses memory from */ |
1543 | mem->scan_nodes = node_states[N_HIGH_MEMORY]; | 1546 | memcg->scan_nodes = node_states[N_HIGH_MEMORY]; |
1544 | 1547 | ||
1545 | for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { | 1548 | for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { |
1546 | 1549 | ||
1547 | if (!test_mem_cgroup_node_reclaimable(mem, nid, false)) | 1550 | if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) |
1548 | node_clear(nid, mem->scan_nodes); | 1551 | node_clear(nid, memcg->scan_nodes); |
1549 | } | 1552 | } |
1550 | 1553 | ||
1551 | atomic_set(&mem->numainfo_events, 0); | 1554 | atomic_set(&memcg->numainfo_events, 0); |
1552 | atomic_set(&mem->numainfo_updating, 0); | 1555 | atomic_set(&memcg->numainfo_updating, 0); |
1553 | } | 1556 | } |
1554 | 1557 | ||
1555 | /* | 1558 | /* |
@@ -1564,16 +1567,16 @@ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) | |||
1564 | * | 1567 | * |
1565 | * Now, we use round-robin. Better algorithm is welcomed. | 1568 | * Now, we use round-robin. Better algorithm is welcomed. |
1566 | */ | 1569 | */ |
1567 | int mem_cgroup_select_victim_node(struct mem_cgroup *mem) | 1570 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) |
1568 | { | 1571 | { |
1569 | int node; | 1572 | int node; |
1570 | 1573 | ||
1571 | mem_cgroup_may_update_nodemask(mem); | 1574 | mem_cgroup_may_update_nodemask(memcg); |
1572 | node = mem->last_scanned_node; | 1575 | node = memcg->last_scanned_node; |
1573 | 1576 | ||
1574 | node = next_node(node, mem->scan_nodes); | 1577 | node = next_node(node, memcg->scan_nodes); |
1575 | if (node == MAX_NUMNODES) | 1578 | if (node == MAX_NUMNODES) |
1576 | node = first_node(mem->scan_nodes); | 1579 | node = first_node(memcg->scan_nodes); |
1577 | /* | 1580 | /* |
1578 | * We call this when we hit limit, not when pages are added to LRU. | 1581 | * We call this when we hit limit, not when pages are added to LRU. |
1579 | * No LRU may hold pages because all pages are UNEVICTABLE or | 1582 | * No LRU may hold pages because all pages are UNEVICTABLE or |
@@ -1583,7 +1586,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem) | |||
1583 | if (unlikely(node == MAX_NUMNODES)) | 1586 | if (unlikely(node == MAX_NUMNODES)) |
1584 | node = numa_node_id(); | 1587 | node = numa_node_id(); |
1585 | 1588 | ||
1586 | mem->last_scanned_node = node; | 1589 | memcg->last_scanned_node = node; |
1587 | return node; | 1590 | return node; |
1588 | } | 1591 | } |
1589 | 1592 | ||
@@ -1593,7 +1596,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem) | |||
1593 | * unused nodes. But scan_nodes is lazily updated and may not cotain | 1596 | * unused nodes. But scan_nodes is lazily updated and may not cotain |
1594 | * enough new information. We need to do double check. | 1597 | * enough new information. We need to do double check. |
1595 | */ | 1598 | */ |
1596 | bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) | 1599 | bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) |
1597 | { | 1600 | { |
1598 | int nid; | 1601 | int nid; |
1599 | 1602 | ||
@@ -1601,12 +1604,12 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) | |||
1601 | * quick check...making use of scan_node. | 1604 | * quick check...making use of scan_node. |
1602 | * We can skip unused nodes. | 1605 | * We can skip unused nodes. |
1603 | */ | 1606 | */ |
1604 | if (!nodes_empty(mem->scan_nodes)) { | 1607 | if (!nodes_empty(memcg->scan_nodes)) { |
1605 | for (nid = first_node(mem->scan_nodes); | 1608 | for (nid = first_node(memcg->scan_nodes); |
1606 | nid < MAX_NUMNODES; | 1609 | nid < MAX_NUMNODES; |
1607 | nid = next_node(nid, mem->scan_nodes)) { | 1610 | nid = next_node(nid, memcg->scan_nodes)) { |
1608 | 1611 | ||
1609 | if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) | 1612 | if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) |
1610 | return true; | 1613 | return true; |
1611 | } | 1614 | } |
1612 | } | 1615 | } |
@@ -1614,23 +1617,23 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) | |||
1614 | * Check rest of nodes. | 1617 | * Check rest of nodes. |
1615 | */ | 1618 | */ |
1616 | for_each_node_state(nid, N_HIGH_MEMORY) { | 1619 | for_each_node_state(nid, N_HIGH_MEMORY) { |
1617 | if (node_isset(nid, mem->scan_nodes)) | 1620 | if (node_isset(nid, memcg->scan_nodes)) |
1618 | continue; | 1621 | continue; |
1619 | if (test_mem_cgroup_node_reclaimable(mem, nid, noswap)) | 1622 | if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) |
1620 | return true; | 1623 | return true; |
1621 | } | 1624 | } |
1622 | return false; | 1625 | return false; |
1623 | } | 1626 | } |
1624 | 1627 | ||
1625 | #else | 1628 | #else |
1626 | int mem_cgroup_select_victim_node(struct mem_cgroup *mem) | 1629 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) |
1627 | { | 1630 | { |
1628 | return 0; | 1631 | return 0; |
1629 | } | 1632 | } |
1630 | 1633 | ||
1631 | bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) | 1634 | bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) |
1632 | { | 1635 | { |
1633 | return test_mem_cgroup_node_reclaimable(mem, 0, noswap); | 1636 | return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); |
1634 | } | 1637 | } |
1635 | #endif | 1638 | #endif |
1636 | 1639 | ||
@@ -1639,14 +1642,14 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) | |||
1639 | * we reclaimed from, so that we don't end up penalizing one child extensively | 1642 | * we reclaimed from, so that we don't end up penalizing one child extensively |
1640 | * based on its position in the children list. | 1643 | * based on its position in the children list. |
1641 | * | 1644 | * |
1642 | * root_mem is the original ancestor that we've been reclaim from. | 1645 | * root_memcg is the original ancestor that we've been reclaim from. |
1643 | * | 1646 | * |
1644 | * We give up and return to the caller when we visit root_mem twice. | 1647 | * We give up and return to the caller when we visit root_memcg twice. |
1645 | * (other groups can be removed while we're walking....) | 1648 | * (other groups can be removed while we're walking....) |
1646 | * | 1649 | * |
1647 | * If shrink==true, for avoiding to free too much, this returns immedieately. | 1650 | * If shrink==true, for avoiding to free too much, this returns immedieately. |
1648 | */ | 1651 | */ |
1649 | static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | 1652 | static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_memcg, |
1650 | struct zone *zone, | 1653 | struct zone *zone, |
1651 | gfp_t gfp_mask, | 1654 | gfp_t gfp_mask, |
1652 | unsigned long reclaim_options, | 1655 | unsigned long reclaim_options, |
@@ -1661,15 +1664,15 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1661 | unsigned long excess; | 1664 | unsigned long excess; |
1662 | unsigned long nr_scanned; | 1665 | unsigned long nr_scanned; |
1663 | 1666 | ||
1664 | excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; | 1667 | excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; |
1665 | 1668 | ||
1666 | /* If memsw_is_minimum==1, swap-out is of-no-use. */ | 1669 | /* If memsw_is_minimum==1, swap-out is of-no-use. */ |
1667 | if (!check_soft && !shrink && root_mem->memsw_is_minimum) | 1670 | if (!check_soft && !shrink && root_memcg->memsw_is_minimum) |
1668 | noswap = true; | 1671 | noswap = true; |
1669 | 1672 | ||
1670 | while (1) { | 1673 | while (1) { |
1671 | victim = mem_cgroup_select_victim(root_mem); | 1674 | victim = mem_cgroup_select_victim(root_memcg); |
1672 | if (victim == root_mem) { | 1675 | if (victim == root_memcg) { |
1673 | loop++; | 1676 | loop++; |
1674 | /* | 1677 | /* |
1675 | * We are not draining per cpu cached charges during | 1678 | * We are not draining per cpu cached charges during |
@@ -1678,7 +1681,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1678 | * charges will not give any. | 1681 | * charges will not give any. |
1679 | */ | 1682 | */ |
1680 | if (!check_soft && loop >= 1) | 1683 | if (!check_soft && loop >= 1) |
1681 | drain_all_stock_async(root_mem); | 1684 | drain_all_stock_async(root_memcg); |
1682 | if (loop >= 2) { | 1685 | if (loop >= 2) { |
1683 | /* | 1686 | /* |
1684 | * If we have not been able to reclaim | 1687 | * If we have not been able to reclaim |
@@ -1725,9 +1728,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1725 | return ret; | 1728 | return ret; |
1726 | total += ret; | 1729 | total += ret; |
1727 | if (check_soft) { | 1730 | if (check_soft) { |
1728 | if (!res_counter_soft_limit_excess(&root_mem->res)) | 1731 | if (!res_counter_soft_limit_excess(&root_memcg->res)) |
1729 | return total; | 1732 | return total; |
1730 | } else if (mem_cgroup_margin(root_mem)) | 1733 | } else if (mem_cgroup_margin(root_memcg)) |
1731 | return total; | 1734 | return total; |
1732 | } | 1735 | } |
1733 | return total; | 1736 | return total; |
@@ -1738,12 +1741,12 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1738 | * If someone is running, return false. | 1741 | * If someone is running, return false. |
1739 | * Has to be called with memcg_oom_lock | 1742 | * Has to be called with memcg_oom_lock |
1740 | */ | 1743 | */ |
1741 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | 1744 | static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg) |
1742 | { | 1745 | { |
1743 | struct mem_cgroup *iter, *failed = NULL; | 1746 | struct mem_cgroup *iter, *failed = NULL; |
1744 | bool cond = true; | 1747 | bool cond = true; |
1745 | 1748 | ||
1746 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { | 1749 | for_each_mem_cgroup_tree_cond(iter, memcg, cond) { |
1747 | if (iter->oom_lock) { | 1750 | if (iter->oom_lock) { |
1748 | /* | 1751 | /* |
1749 | * this subtree of our hierarchy is already locked | 1752 | * this subtree of our hierarchy is already locked |
@@ -1763,7 +1766,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | |||
1763 | * what we set up to the failing subtree | 1766 | * what we set up to the failing subtree |
1764 | */ | 1767 | */ |
1765 | cond = true; | 1768 | cond = true; |
1766 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { | 1769 | for_each_mem_cgroup_tree_cond(iter, memcg, cond) { |
1767 | if (iter == failed) { | 1770 | if (iter == failed) { |
1768 | cond = false; | 1771 | cond = false; |
1769 | continue; | 1772 | continue; |
@@ -1776,24 +1779,24 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | |||
1776 | /* | 1779 | /* |
1777 | * Has to be called with memcg_oom_lock | 1780 | * Has to be called with memcg_oom_lock |
1778 | */ | 1781 | */ |
1779 | static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) | 1782 | static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg) |
1780 | { | 1783 | { |
1781 | struct mem_cgroup *iter; | 1784 | struct mem_cgroup *iter; |
1782 | 1785 | ||
1783 | for_each_mem_cgroup_tree(iter, mem) | 1786 | for_each_mem_cgroup_tree(iter, memcg) |
1784 | iter->oom_lock = false; | 1787 | iter->oom_lock = false; |
1785 | return 0; | 1788 | return 0; |
1786 | } | 1789 | } |
1787 | 1790 | ||
1788 | static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem) | 1791 | static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) |
1789 | { | 1792 | { |
1790 | struct mem_cgroup *iter; | 1793 | struct mem_cgroup *iter; |
1791 | 1794 | ||
1792 | for_each_mem_cgroup_tree(iter, mem) | 1795 | for_each_mem_cgroup_tree(iter, memcg) |
1793 | atomic_inc(&iter->under_oom); | 1796 | atomic_inc(&iter->under_oom); |
1794 | } | 1797 | } |
1795 | 1798 | ||
1796 | static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem) | 1799 | static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) |
1797 | { | 1800 | { |
1798 | struct mem_cgroup *iter; | 1801 | struct mem_cgroup *iter; |
1799 | 1802 | ||
@@ -1802,7 +1805,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem) | |||
1802 | * mem_cgroup_oom_lock() may not be called. We have to use | 1805 | * mem_cgroup_oom_lock() may not be called. We have to use |
1803 | * atomic_add_unless() here. | 1806 | * atomic_add_unless() here. |
1804 | */ | 1807 | */ |
1805 | for_each_mem_cgroup_tree(iter, mem) | 1808 | for_each_mem_cgroup_tree(iter, memcg) |
1806 | atomic_add_unless(&iter->under_oom, -1, 0); | 1809 | atomic_add_unless(&iter->under_oom, -1, 0); |
1807 | } | 1810 | } |
1808 | 1811 | ||
@@ -1817,80 +1820,80 @@ struct oom_wait_info { | |||
1817 | static int memcg_oom_wake_function(wait_queue_t *wait, | 1820 | static int memcg_oom_wake_function(wait_queue_t *wait, |
1818 | unsigned mode, int sync, void *arg) | 1821 | unsigned mode, int sync, void *arg) |
1819 | { | 1822 | { |
1820 | struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg, | 1823 | struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg, |
1821 | *oom_wait_mem; | 1824 | *oom_wait_memcg; |
1822 | struct oom_wait_info *oom_wait_info; | 1825 | struct oom_wait_info *oom_wait_info; |
1823 | 1826 | ||
1824 | oom_wait_info = container_of(wait, struct oom_wait_info, wait); | 1827 | oom_wait_info = container_of(wait, struct oom_wait_info, wait); |
1825 | oom_wait_mem = oom_wait_info->mem; | 1828 | oom_wait_memcg = oom_wait_info->mem; |
1826 | 1829 | ||
1827 | /* | 1830 | /* |
1828 | * Both of oom_wait_info->mem and wake_mem are stable under us. | 1831 | * Both of oom_wait_info->mem and wake_mem are stable under us. |
1829 | * Then we can use css_is_ancestor without taking care of RCU. | 1832 | * Then we can use css_is_ancestor without taking care of RCU. |
1830 | */ | 1833 | */ |
1831 | if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem) | 1834 | if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) |
1832 | && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem)) | 1835 | && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) |
1833 | return 0; | 1836 | return 0; |
1834 | return autoremove_wake_function(wait, mode, sync, arg); | 1837 | return autoremove_wake_function(wait, mode, sync, arg); |
1835 | } | 1838 | } |
1836 | 1839 | ||
1837 | static void memcg_wakeup_oom(struct mem_cgroup *mem) | 1840 | static void memcg_wakeup_oom(struct mem_cgroup *memcg) |
1838 | { | 1841 | { |
1839 | /* for filtering, pass "mem" as argument. */ | 1842 | /* for filtering, pass "memcg" as argument. */ |
1840 | __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem); | 1843 | __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); |
1841 | } | 1844 | } |
1842 | 1845 | ||
1843 | static void memcg_oom_recover(struct mem_cgroup *mem) | 1846 | static void memcg_oom_recover(struct mem_cgroup *memcg) |
1844 | { | 1847 | { |
1845 | if (mem && atomic_read(&mem->under_oom)) | 1848 | if (memcg && atomic_read(&memcg->under_oom)) |
1846 | memcg_wakeup_oom(mem); | 1849 | memcg_wakeup_oom(memcg); |
1847 | } | 1850 | } |
1848 | 1851 | ||
1849 | /* | 1852 | /* |
1850 | * try to call OOM killer. returns false if we should exit memory-reclaim loop. | 1853 | * try to call OOM killer. returns false if we should exit memory-reclaim loop. |
1851 | */ | 1854 | */ |
1852 | bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) | 1855 | bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask) |
1853 | { | 1856 | { |
1854 | struct oom_wait_info owait; | 1857 | struct oom_wait_info owait; |
1855 | bool locked, need_to_kill; | 1858 | bool locked, need_to_kill; |
1856 | 1859 | ||
1857 | owait.mem = mem; | 1860 | owait.mem = memcg; |
1858 | owait.wait.flags = 0; | 1861 | owait.wait.flags = 0; |
1859 | owait.wait.func = memcg_oom_wake_function; | 1862 | owait.wait.func = memcg_oom_wake_function; |
1860 | owait.wait.private = current; | 1863 | owait.wait.private = current; |
1861 | INIT_LIST_HEAD(&owait.wait.task_list); | 1864 | INIT_LIST_HEAD(&owait.wait.task_list); |
1862 | need_to_kill = true; | 1865 | need_to_kill = true; |
1863 | mem_cgroup_mark_under_oom(mem); | 1866 | mem_cgroup_mark_under_oom(memcg); |
1864 | 1867 | ||
1865 | /* At first, try to OOM lock hierarchy under mem.*/ | 1868 | /* At first, try to OOM lock hierarchy under memcg.*/ |
1866 | spin_lock(&memcg_oom_lock); | 1869 | spin_lock(&memcg_oom_lock); |
1867 | locked = mem_cgroup_oom_lock(mem); | 1870 | locked = mem_cgroup_oom_lock(memcg); |
1868 | /* | 1871 | /* |
1869 | * Even if signal_pending(), we can't quit charge() loop without | 1872 | * Even if signal_pending(), we can't quit charge() loop without |
1870 | * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL | 1873 | * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL |
1871 | * under OOM is always welcomed, use TASK_KILLABLE here. | 1874 | * under OOM is always welcomed, use TASK_KILLABLE here. |
1872 | */ | 1875 | */ |
1873 | prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); | 1876 | prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); |
1874 | if (!locked || mem->oom_kill_disable) | 1877 | if (!locked || memcg->oom_kill_disable) |
1875 | need_to_kill = false; | 1878 | need_to_kill = false; |
1876 | if (locked) | 1879 | if (locked) |
1877 | mem_cgroup_oom_notify(mem); | 1880 | mem_cgroup_oom_notify(memcg); |
1878 | spin_unlock(&memcg_oom_lock); | 1881 | spin_unlock(&memcg_oom_lock); |
1879 | 1882 | ||
1880 | if (need_to_kill) { | 1883 | if (need_to_kill) { |
1881 | finish_wait(&memcg_oom_waitq, &owait.wait); | 1884 | finish_wait(&memcg_oom_waitq, &owait.wait); |
1882 | mem_cgroup_out_of_memory(mem, mask); | 1885 | mem_cgroup_out_of_memory(memcg, mask); |
1883 | } else { | 1886 | } else { |
1884 | schedule(); | 1887 | schedule(); |
1885 | finish_wait(&memcg_oom_waitq, &owait.wait); | 1888 | finish_wait(&memcg_oom_waitq, &owait.wait); |
1886 | } | 1889 | } |
1887 | spin_lock(&memcg_oom_lock); | 1890 | spin_lock(&memcg_oom_lock); |
1888 | if (locked) | 1891 | if (locked) |
1889 | mem_cgroup_oom_unlock(mem); | 1892 | mem_cgroup_oom_unlock(memcg); |
1890 | memcg_wakeup_oom(mem); | 1893 | memcg_wakeup_oom(memcg); |
1891 | spin_unlock(&memcg_oom_lock); | 1894 | spin_unlock(&memcg_oom_lock); |
1892 | 1895 | ||
1893 | mem_cgroup_unmark_under_oom(mem); | 1896 | mem_cgroup_unmark_under_oom(memcg); |
1894 | 1897 | ||
1895 | if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) | 1898 | if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) |
1896 | return false; | 1899 | return false; |
@@ -1926,7 +1929,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) | |||
1926 | void mem_cgroup_update_page_stat(struct page *page, | 1929 | void mem_cgroup_update_page_stat(struct page *page, |
1927 | enum mem_cgroup_page_stat_item idx, int val) | 1930 | enum mem_cgroup_page_stat_item idx, int val) |
1928 | { | 1931 | { |
1929 | struct mem_cgroup *mem; | 1932 | struct mem_cgroup *memcg; |
1930 | struct page_cgroup *pc = lookup_page_cgroup(page); | 1933 | struct page_cgroup *pc = lookup_page_cgroup(page); |
1931 | bool need_unlock = false; | 1934 | bool need_unlock = false; |
1932 | unsigned long uninitialized_var(flags); | 1935 | unsigned long uninitialized_var(flags); |
@@ -1935,16 +1938,16 @@ void mem_cgroup_update_page_stat(struct page *page, | |||
1935 | return; | 1938 | return; |
1936 | 1939 | ||
1937 | rcu_read_lock(); | 1940 | rcu_read_lock(); |
1938 | mem = pc->mem_cgroup; | 1941 | memcg = pc->mem_cgroup; |
1939 | if (unlikely(!mem || !PageCgroupUsed(pc))) | 1942 | if (unlikely(!memcg || !PageCgroupUsed(pc))) |
1940 | goto out; | 1943 | goto out; |
1941 | /* pc->mem_cgroup is unstable ? */ | 1944 | /* pc->mem_cgroup is unstable ? */ |
1942 | if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) { | 1945 | if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) { |
1943 | /* take a lock against to access pc->mem_cgroup */ | 1946 | /* take a lock against to access pc->mem_cgroup */ |
1944 | move_lock_page_cgroup(pc, &flags); | 1947 | move_lock_page_cgroup(pc, &flags); |
1945 | need_unlock = true; | 1948 | need_unlock = true; |
1946 | mem = pc->mem_cgroup; | 1949 | memcg = pc->mem_cgroup; |
1947 | if (!mem || !PageCgroupUsed(pc)) | 1950 | if (!memcg || !PageCgroupUsed(pc)) |
1948 | goto out; | 1951 | goto out; |
1949 | } | 1952 | } |
1950 | 1953 | ||
@@ -1960,7 +1963,7 @@ void mem_cgroup_update_page_stat(struct page *page, | |||
1960 | BUG(); | 1963 | BUG(); |
1961 | } | 1964 | } |
1962 | 1965 | ||
1963 | this_cpu_add(mem->stat->count[idx], val); | 1966 | this_cpu_add(memcg->stat->count[idx], val); |
1964 | 1967 | ||
1965 | out: | 1968 | out: |
1966 | if (unlikely(need_unlock)) | 1969 | if (unlikely(need_unlock)) |
@@ -1991,13 +1994,13 @@ static DEFINE_MUTEX(percpu_charge_mutex); | |||
1991 | * cgroup which is not current target, returns false. This stock will be | 1994 | * cgroup which is not current target, returns false. This stock will be |
1992 | * refilled. | 1995 | * refilled. |
1993 | */ | 1996 | */ |
1994 | static bool consume_stock(struct mem_cgroup *mem) | 1997 | static bool consume_stock(struct mem_cgroup *memcg) |
1995 | { | 1998 | { |
1996 | struct memcg_stock_pcp *stock; | 1999 | struct memcg_stock_pcp *stock; |
1997 | bool ret = true; | 2000 | bool ret = true; |
1998 | 2001 | ||
1999 | stock = &get_cpu_var(memcg_stock); | 2002 | stock = &get_cpu_var(memcg_stock); |
2000 | if (mem == stock->cached && stock->nr_pages) | 2003 | if (memcg == stock->cached && stock->nr_pages) |
2001 | stock->nr_pages--; | 2004 | stock->nr_pages--; |
2002 | else /* need to call res_counter_charge */ | 2005 | else /* need to call res_counter_charge */ |
2003 | ret = false; | 2006 | ret = false; |
@@ -2038,24 +2041,24 @@ static void drain_local_stock(struct work_struct *dummy) | |||
2038 | * Cache charges(val) which is from res_counter, to local per_cpu area. | 2041 | * Cache charges(val) which is from res_counter, to local per_cpu area. |
2039 | * This will be consumed by consume_stock() function, later. | 2042 | * This will be consumed by consume_stock() function, later. |
2040 | */ | 2043 | */ |
2041 | static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages) | 2044 | static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) |
2042 | { | 2045 | { |
2043 | struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); | 2046 | struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); |
2044 | 2047 | ||
2045 | if (stock->cached != mem) { /* reset if necessary */ | 2048 | if (stock->cached != memcg) { /* reset if necessary */ |
2046 | drain_stock(stock); | 2049 | drain_stock(stock); |
2047 | stock->cached = mem; | 2050 | stock->cached = memcg; |
2048 | } | 2051 | } |
2049 | stock->nr_pages += nr_pages; | 2052 | stock->nr_pages += nr_pages; |
2050 | put_cpu_var(memcg_stock); | 2053 | put_cpu_var(memcg_stock); |
2051 | } | 2054 | } |
2052 | 2055 | ||
2053 | /* | 2056 | /* |
2054 | * Drains all per-CPU charge caches for given root_mem resp. subtree | 2057 | * Drains all per-CPU charge caches for given root_memcg resp. subtree |
2055 | * of the hierarchy under it. sync flag says whether we should block | 2058 | * of the hierarchy under it. sync flag says whether we should block |
2056 | * until the work is done. | 2059 | * until the work is done. |
2057 | */ | 2060 | */ |
2058 | static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | 2061 | static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) |
2059 | { | 2062 | { |
2060 | int cpu, curcpu; | 2063 | int cpu, curcpu; |
2061 | 2064 | ||
@@ -2064,12 +2067,12 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2064 | curcpu = get_cpu(); | 2067 | curcpu = get_cpu(); |
2065 | for_each_online_cpu(cpu) { | 2068 | for_each_online_cpu(cpu) { |
2066 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 2069 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
2067 | struct mem_cgroup *mem; | 2070 | struct mem_cgroup *memcg; |
2068 | 2071 | ||
2069 | mem = stock->cached; | 2072 | memcg = stock->cached; |
2070 | if (!mem || !stock->nr_pages) | 2073 | if (!memcg || !stock->nr_pages) |
2071 | continue; | 2074 | continue; |
2072 | if (!mem_cgroup_same_or_subtree(root_mem, mem)) | 2075 | if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) |
2073 | continue; | 2076 | continue; |
2074 | if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { | 2077 | if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { |
2075 | if (cpu == curcpu) | 2078 | if (cpu == curcpu) |
@@ -2098,23 +2101,23 @@ out: | |||
2098 | * expects some charges will be back to res_counter later but cannot wait for | 2101 | * expects some charges will be back to res_counter later but cannot wait for |
2099 | * it. | 2102 | * it. |
2100 | */ | 2103 | */ |
2101 | static void drain_all_stock_async(struct mem_cgroup *root_mem) | 2104 | static void drain_all_stock_async(struct mem_cgroup *root_memcg) |
2102 | { | 2105 | { |
2103 | /* | 2106 | /* |
2104 | * If someone calls draining, avoid adding more kworker runs. | 2107 | * If someone calls draining, avoid adding more kworker runs. |
2105 | */ | 2108 | */ |
2106 | if (!mutex_trylock(&percpu_charge_mutex)) | 2109 | if (!mutex_trylock(&percpu_charge_mutex)) |
2107 | return; | 2110 | return; |
2108 | drain_all_stock(root_mem, false); | 2111 | drain_all_stock(root_memcg, false); |
2109 | mutex_unlock(&percpu_charge_mutex); | 2112 | mutex_unlock(&percpu_charge_mutex); |
2110 | } | 2113 | } |
2111 | 2114 | ||
2112 | /* This is a synchronous drain interface. */ | 2115 | /* This is a synchronous drain interface. */ |
2113 | static void drain_all_stock_sync(struct mem_cgroup *root_mem) | 2116 | static void drain_all_stock_sync(struct mem_cgroup *root_memcg) |
2114 | { | 2117 | { |
2115 | /* called when force_empty is called */ | 2118 | /* called when force_empty is called */ |
2116 | mutex_lock(&percpu_charge_mutex); | 2119 | mutex_lock(&percpu_charge_mutex); |
2117 | drain_all_stock(root_mem, true); | 2120 | drain_all_stock(root_memcg, true); |
2118 | mutex_unlock(&percpu_charge_mutex); | 2121 | mutex_unlock(&percpu_charge_mutex); |
2119 | } | 2122 | } |
2120 | 2123 | ||
@@ -2122,35 +2125,35 @@ static void drain_all_stock_sync(struct mem_cgroup *root_mem) | |||
2122 | * This function drains percpu counter value from DEAD cpu and | 2125 | * This function drains percpu counter value from DEAD cpu and |
2123 | * move it to local cpu. Note that this function can be preempted. | 2126 | * move it to local cpu. Note that this function can be preempted. |
2124 | */ | 2127 | */ |
2125 | static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) | 2128 | static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) |
2126 | { | 2129 | { |
2127 | int i; | 2130 | int i; |
2128 | 2131 | ||
2129 | spin_lock(&mem->pcp_counter_lock); | 2132 | spin_lock(&memcg->pcp_counter_lock); |
2130 | for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { | 2133 | for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { |
2131 | long x = per_cpu(mem->stat->count[i], cpu); | 2134 | long x = per_cpu(memcg->stat->count[i], cpu); |
2132 | 2135 | ||
2133 | per_cpu(mem->stat->count[i], cpu) = 0; | 2136 | per_cpu(memcg->stat->count[i], cpu) = 0; |
2134 | mem->nocpu_base.count[i] += x; | 2137 | memcg->nocpu_base.count[i] += x; |
2135 | } | 2138 | } |
2136 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { | 2139 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { |
2137 | unsigned long x = per_cpu(mem->stat->events[i], cpu); | 2140 | unsigned long x = per_cpu(memcg->stat->events[i], cpu); |
2138 | 2141 | ||
2139 | per_cpu(mem->stat->events[i], cpu) = 0; | 2142 | per_cpu(memcg->stat->events[i], cpu) = 0; |
2140 | mem->nocpu_base.events[i] += x; | 2143 | memcg->nocpu_base.events[i] += x; |
2141 | } | 2144 | } |
2142 | /* need to clear ON_MOVE value, works as a kind of lock. */ | 2145 | /* need to clear ON_MOVE value, works as a kind of lock. */ |
2143 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; | 2146 | per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; |
2144 | spin_unlock(&mem->pcp_counter_lock); | 2147 | spin_unlock(&memcg->pcp_counter_lock); |
2145 | } | 2148 | } |
2146 | 2149 | ||
2147 | static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) | 2150 | static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu) |
2148 | { | 2151 | { |
2149 | int idx = MEM_CGROUP_ON_MOVE; | 2152 | int idx = MEM_CGROUP_ON_MOVE; |
2150 | 2153 | ||
2151 | spin_lock(&mem->pcp_counter_lock); | 2154 | spin_lock(&memcg->pcp_counter_lock); |
2152 | per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; | 2155 | per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx]; |
2153 | spin_unlock(&mem->pcp_counter_lock); | 2156 | spin_unlock(&memcg->pcp_counter_lock); |
2154 | } | 2157 | } |
2155 | 2158 | ||
2156 | static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, | 2159 | static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, |
@@ -2188,7 +2191,7 @@ enum { | |||
2188 | CHARGE_OOM_DIE, /* the current is killed because of OOM */ | 2191 | CHARGE_OOM_DIE, /* the current is killed because of OOM */ |
2189 | }; | 2192 | }; |
2190 | 2193 | ||
2191 | static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, | 2194 | static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, |
2192 | unsigned int nr_pages, bool oom_check) | 2195 | unsigned int nr_pages, bool oom_check) |
2193 | { | 2196 | { |
2194 | unsigned long csize = nr_pages * PAGE_SIZE; | 2197 | unsigned long csize = nr_pages * PAGE_SIZE; |
@@ -2197,16 +2200,16 @@ static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, | |||
2197 | unsigned long flags = 0; | 2200 | unsigned long flags = 0; |
2198 | int ret; | 2201 | int ret; |
2199 | 2202 | ||
2200 | ret = res_counter_charge(&mem->res, csize, &fail_res); | 2203 | ret = res_counter_charge(&memcg->res, csize, &fail_res); |
2201 | 2204 | ||
2202 | if (likely(!ret)) { | 2205 | if (likely(!ret)) { |
2203 | if (!do_swap_account) | 2206 | if (!do_swap_account) |
2204 | return CHARGE_OK; | 2207 | return CHARGE_OK; |
2205 | ret = res_counter_charge(&mem->memsw, csize, &fail_res); | 2208 | ret = res_counter_charge(&memcg->memsw, csize, &fail_res); |
2206 | if (likely(!ret)) | 2209 | if (likely(!ret)) |
2207 | return CHARGE_OK; | 2210 | return CHARGE_OK; |
2208 | 2211 | ||
2209 | res_counter_uncharge(&mem->res, csize); | 2212 | res_counter_uncharge(&memcg->res, csize); |
2210 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); | 2213 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); |
2211 | flags |= MEM_CGROUP_RECLAIM_NOSWAP; | 2214 | flags |= MEM_CGROUP_RECLAIM_NOSWAP; |
2212 | } else | 2215 | } else |
@@ -2264,12 +2267,12 @@ static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, | |||
2264 | static int __mem_cgroup_try_charge(struct mm_struct *mm, | 2267 | static int __mem_cgroup_try_charge(struct mm_struct *mm, |
2265 | gfp_t gfp_mask, | 2268 | gfp_t gfp_mask, |
2266 | unsigned int nr_pages, | 2269 | unsigned int nr_pages, |
2267 | struct mem_cgroup **memcg, | 2270 | struct mem_cgroup **ptr, |
2268 | bool oom) | 2271 | bool oom) |
2269 | { | 2272 | { |
2270 | unsigned int batch = max(CHARGE_BATCH, nr_pages); | 2273 | unsigned int batch = max(CHARGE_BATCH, nr_pages); |
2271 | int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; | 2274 | int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; |
2272 | struct mem_cgroup *mem = NULL; | 2275 | struct mem_cgroup *memcg = NULL; |
2273 | int ret; | 2276 | int ret; |
2274 | 2277 | ||
2275 | /* | 2278 | /* |
@@ -2287,17 +2290,17 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, | |||
2287 | * thread group leader migrates. It's possible that mm is not | 2290 | * thread group leader migrates. It's possible that mm is not |
2288 | * set, if so charge the init_mm (happens for pagecache usage). | 2291 | * set, if so charge the init_mm (happens for pagecache usage). |
2289 | */ | 2292 | */ |
2290 | if (!*memcg && !mm) | 2293 | if (!*ptr && !mm) |
2291 | goto bypass; | 2294 | goto bypass; |
2292 | again: | 2295 | again: |
2293 | if (*memcg) { /* css should be a valid one */ | 2296 | if (*ptr) { /* css should be a valid one */ |
2294 | mem = *memcg; | 2297 | memcg = *ptr; |
2295 | VM_BUG_ON(css_is_removed(&mem->css)); | 2298 | VM_BUG_ON(css_is_removed(&memcg->css)); |
2296 | if (mem_cgroup_is_root(mem)) | 2299 | if (mem_cgroup_is_root(memcg)) |
2297 | goto done; | 2300 | goto done; |
2298 | if (nr_pages == 1 && consume_stock(mem)) | 2301 | if (nr_pages == 1 && consume_stock(memcg)) |
2299 | goto done; | 2302 | goto done; |
2300 | css_get(&mem->css); | 2303 | css_get(&memcg->css); |
2301 | } else { | 2304 | } else { |
2302 | struct task_struct *p; | 2305 | struct task_struct *p; |
2303 | 2306 | ||
@@ -2305,7 +2308,7 @@ again: | |||
2305 | p = rcu_dereference(mm->owner); | 2308 | p = rcu_dereference(mm->owner); |
2306 | /* | 2309 | /* |
2307 | * Because we don't have task_lock(), "p" can exit. | 2310 | * Because we don't have task_lock(), "p" can exit. |
2308 | * In that case, "mem" can point to root or p can be NULL with | 2311 | * In that case, "memcg" can point to root or p can be NULL with |
2309 | * race with swapoff. Then, we have small risk of mis-accouning. | 2312 | * race with swapoff. Then, we have small risk of mis-accouning. |
2310 | * But such kind of mis-account by race always happens because | 2313 | * But such kind of mis-account by race always happens because |
2311 | * we don't have cgroup_mutex(). It's overkill and we allo that | 2314 | * we don't have cgroup_mutex(). It's overkill and we allo that |
@@ -2313,12 +2316,12 @@ again: | |||
2313 | * (*) swapoff at el will charge against mm-struct not against | 2316 | * (*) swapoff at el will charge against mm-struct not against |
2314 | * task-struct. So, mm->owner can be NULL. | 2317 | * task-struct. So, mm->owner can be NULL. |
2315 | */ | 2318 | */ |
2316 | mem = mem_cgroup_from_task(p); | 2319 | memcg = mem_cgroup_from_task(p); |
2317 | if (!mem || mem_cgroup_is_root(mem)) { | 2320 | if (!memcg || mem_cgroup_is_root(memcg)) { |
2318 | rcu_read_unlock(); | 2321 | rcu_read_unlock(); |
2319 | goto done; | 2322 | goto done; |
2320 | } | 2323 | } |
2321 | if (nr_pages == 1 && consume_stock(mem)) { | 2324 | if (nr_pages == 1 && consume_stock(memcg)) { |
2322 | /* | 2325 | /* |
2323 | * It seems dagerous to access memcg without css_get(). | 2326 | * It seems dagerous to access memcg without css_get(). |
2324 | * But considering how consume_stok works, it's not | 2327 | * But considering how consume_stok works, it's not |
@@ -2331,7 +2334,7 @@ again: | |||
2331 | goto done; | 2334 | goto done; |
2332 | } | 2335 | } |
2333 | /* after here, we may be blocked. we need to get refcnt */ | 2336 | /* after here, we may be blocked. we need to get refcnt */ |
2334 | if (!css_tryget(&mem->css)) { | 2337 | if (!css_tryget(&memcg->css)) { |
2335 | rcu_read_unlock(); | 2338 | rcu_read_unlock(); |
2336 | goto again; | 2339 | goto again; |
2337 | } | 2340 | } |
@@ -2343,7 +2346,7 @@ again: | |||
2343 | 2346 | ||
2344 | /* If killed, bypass charge */ | 2347 | /* If killed, bypass charge */ |
2345 | if (fatal_signal_pending(current)) { | 2348 | if (fatal_signal_pending(current)) { |
2346 | css_put(&mem->css); | 2349 | css_put(&memcg->css); |
2347 | goto bypass; | 2350 | goto bypass; |
2348 | } | 2351 | } |
2349 | 2352 | ||
@@ -2353,43 +2356,43 @@ again: | |||
2353 | nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; | 2356 | nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; |
2354 | } | 2357 | } |
2355 | 2358 | ||
2356 | ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check); | 2359 | ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check); |
2357 | switch (ret) { | 2360 | switch (ret) { |
2358 | case CHARGE_OK: | 2361 | case CHARGE_OK: |
2359 | break; | 2362 | break; |
2360 | case CHARGE_RETRY: /* not in OOM situation but retry */ | 2363 | case CHARGE_RETRY: /* not in OOM situation but retry */ |
2361 | batch = nr_pages; | 2364 | batch = nr_pages; |
2362 | css_put(&mem->css); | 2365 | css_put(&memcg->css); |
2363 | mem = NULL; | 2366 | memcg = NULL; |
2364 | goto again; | 2367 | goto again; |
2365 | case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ | 2368 | case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ |
2366 | css_put(&mem->css); | 2369 | css_put(&memcg->css); |
2367 | goto nomem; | 2370 | goto nomem; |
2368 | case CHARGE_NOMEM: /* OOM routine works */ | 2371 | case CHARGE_NOMEM: /* OOM routine works */ |
2369 | if (!oom) { | 2372 | if (!oom) { |
2370 | css_put(&mem->css); | 2373 | css_put(&memcg->css); |
2371 | goto nomem; | 2374 | goto nomem; |
2372 | } | 2375 | } |
2373 | /* If oom, we never return -ENOMEM */ | 2376 | /* If oom, we never return -ENOMEM */ |
2374 | nr_oom_retries--; | 2377 | nr_oom_retries--; |
2375 | break; | 2378 | break; |
2376 | case CHARGE_OOM_DIE: /* Killed by OOM Killer */ | 2379 | case CHARGE_OOM_DIE: /* Killed by OOM Killer */ |
2377 | css_put(&mem->css); | 2380 | css_put(&memcg->css); |
2378 | goto bypass; | 2381 | goto bypass; |
2379 | } | 2382 | } |
2380 | } while (ret != CHARGE_OK); | 2383 | } while (ret != CHARGE_OK); |
2381 | 2384 | ||
2382 | if (batch > nr_pages) | 2385 | if (batch > nr_pages) |
2383 | refill_stock(mem, batch - nr_pages); | 2386 | refill_stock(memcg, batch - nr_pages); |
2384 | css_put(&mem->css); | 2387 | css_put(&memcg->css); |
2385 | done: | 2388 | done: |
2386 | *memcg = mem; | 2389 | *ptr = memcg; |
2387 | return 0; | 2390 | return 0; |
2388 | nomem: | 2391 | nomem: |
2389 | *memcg = NULL; | 2392 | *ptr = NULL; |
2390 | return -ENOMEM; | 2393 | return -ENOMEM; |
2391 | bypass: | 2394 | bypass: |
2392 | *memcg = NULL; | 2395 | *ptr = NULL; |
2393 | return 0; | 2396 | return 0; |
2394 | } | 2397 | } |
2395 | 2398 | ||
@@ -2398,15 +2401,15 @@ bypass: | |||
2398 | * This function is for that and do uncharge, put css's refcnt. | 2401 | * This function is for that and do uncharge, put css's refcnt. |
2399 | * gotten by try_charge(). | 2402 | * gotten by try_charge(). |
2400 | */ | 2403 | */ |
2401 | static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, | 2404 | static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg, |
2402 | unsigned int nr_pages) | 2405 | unsigned int nr_pages) |
2403 | { | 2406 | { |
2404 | if (!mem_cgroup_is_root(mem)) { | 2407 | if (!mem_cgroup_is_root(memcg)) { |
2405 | unsigned long bytes = nr_pages * PAGE_SIZE; | 2408 | unsigned long bytes = nr_pages * PAGE_SIZE; |
2406 | 2409 | ||
2407 | res_counter_uncharge(&mem->res, bytes); | 2410 | res_counter_uncharge(&memcg->res, bytes); |
2408 | if (do_swap_account) | 2411 | if (do_swap_account) |
2409 | res_counter_uncharge(&mem->memsw, bytes); | 2412 | res_counter_uncharge(&memcg->memsw, bytes); |
2410 | } | 2413 | } |
2411 | } | 2414 | } |
2412 | 2415 | ||
@@ -2431,7 +2434,7 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) | |||
2431 | 2434 | ||
2432 | struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | 2435 | struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) |
2433 | { | 2436 | { |
2434 | struct mem_cgroup *mem = NULL; | 2437 | struct mem_cgroup *memcg = NULL; |
2435 | struct page_cgroup *pc; | 2438 | struct page_cgroup *pc; |
2436 | unsigned short id; | 2439 | unsigned short id; |
2437 | swp_entry_t ent; | 2440 | swp_entry_t ent; |
@@ -2441,23 +2444,23 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | |||
2441 | pc = lookup_page_cgroup(page); | 2444 | pc = lookup_page_cgroup(page); |
2442 | lock_page_cgroup(pc); | 2445 | lock_page_cgroup(pc); |
2443 | if (PageCgroupUsed(pc)) { | 2446 | if (PageCgroupUsed(pc)) { |
2444 | mem = pc->mem_cgroup; | 2447 | memcg = pc->mem_cgroup; |
2445 | if (mem && !css_tryget(&mem->css)) | 2448 | if (memcg && !css_tryget(&memcg->css)) |
2446 | mem = NULL; | 2449 | memcg = NULL; |
2447 | } else if (PageSwapCache(page)) { | 2450 | } else if (PageSwapCache(page)) { |
2448 | ent.val = page_private(page); | 2451 | ent.val = page_private(page); |
2449 | id = lookup_swap_cgroup(ent); | 2452 | id = lookup_swap_cgroup(ent); |
2450 | rcu_read_lock(); | 2453 | rcu_read_lock(); |
2451 | mem = mem_cgroup_lookup(id); | 2454 | memcg = mem_cgroup_lookup(id); |
2452 | if (mem && !css_tryget(&mem->css)) | 2455 | if (memcg && !css_tryget(&memcg->css)) |
2453 | mem = NULL; | 2456 | memcg = NULL; |
2454 | rcu_read_unlock(); | 2457 | rcu_read_unlock(); |
2455 | } | 2458 | } |
2456 | unlock_page_cgroup(pc); | 2459 | unlock_page_cgroup(pc); |
2457 | return mem; | 2460 | return memcg; |
2458 | } | 2461 | } |
2459 | 2462 | ||
2460 | static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, | 2463 | static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, |
2461 | struct page *page, | 2464 | struct page *page, |
2462 | unsigned int nr_pages, | 2465 | unsigned int nr_pages, |
2463 | struct page_cgroup *pc, | 2466 | struct page_cgroup *pc, |
@@ -2466,14 +2469,14 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, | |||
2466 | lock_page_cgroup(pc); | 2469 | lock_page_cgroup(pc); |
2467 | if (unlikely(PageCgroupUsed(pc))) { | 2470 | if (unlikely(PageCgroupUsed(pc))) { |
2468 | unlock_page_cgroup(pc); | 2471 | unlock_page_cgroup(pc); |
2469 | __mem_cgroup_cancel_charge(mem, nr_pages); | 2472 | __mem_cgroup_cancel_charge(memcg, nr_pages); |
2470 | return; | 2473 | return; |
2471 | } | 2474 | } |
2472 | /* | 2475 | /* |
2473 | * we don't need page_cgroup_lock about tail pages, becase they are not | 2476 | * we don't need page_cgroup_lock about tail pages, becase they are not |
2474 | * accessed by any other context at this point. | 2477 | * accessed by any other context at this point. |
2475 | */ | 2478 | */ |
2476 | pc->mem_cgroup = mem; | 2479 | pc->mem_cgroup = memcg; |
2477 | /* | 2480 | /* |
2478 | * We access a page_cgroup asynchronously without lock_page_cgroup(). | 2481 | * We access a page_cgroup asynchronously without lock_page_cgroup(). |
2479 | * Especially when a page_cgroup is taken from a page, pc->mem_cgroup | 2482 | * Especially when a page_cgroup is taken from a page, pc->mem_cgroup |
@@ -2496,14 +2499,14 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, | |||
2496 | break; | 2499 | break; |
2497 | } | 2500 | } |
2498 | 2501 | ||
2499 | mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages); | 2502 | mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); |
2500 | unlock_page_cgroup(pc); | 2503 | unlock_page_cgroup(pc); |
2501 | /* | 2504 | /* |
2502 | * "charge_statistics" updated event counter. Then, check it. | 2505 | * "charge_statistics" updated event counter. Then, check it. |
2503 | * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. | 2506 | * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. |
2504 | * if they exceeds softlimit. | 2507 | * if they exceeds softlimit. |
2505 | */ | 2508 | */ |
2506 | memcg_check_events(mem, page); | 2509 | memcg_check_events(memcg, page); |
2507 | } | 2510 | } |
2508 | 2511 | ||
2509 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 2512 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -2690,7 +2693,7 @@ out: | |||
2690 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | 2693 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, |
2691 | gfp_t gfp_mask, enum charge_type ctype) | 2694 | gfp_t gfp_mask, enum charge_type ctype) |
2692 | { | 2695 | { |
2693 | struct mem_cgroup *mem = NULL; | 2696 | struct mem_cgroup *memcg = NULL; |
2694 | unsigned int nr_pages = 1; | 2697 | unsigned int nr_pages = 1; |
2695 | struct page_cgroup *pc; | 2698 | struct page_cgroup *pc; |
2696 | bool oom = true; | 2699 | bool oom = true; |
@@ -2709,11 +2712,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
2709 | pc = lookup_page_cgroup(page); | 2712 | pc = lookup_page_cgroup(page); |
2710 | BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */ | 2713 | BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */ |
2711 | 2714 | ||
2712 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom); | 2715 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); |
2713 | if (ret || !mem) | 2716 | if (ret || !memcg) |
2714 | return ret; | 2717 | return ret; |
2715 | 2718 | ||
2716 | __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype); | 2719 | __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); |
2717 | return 0; | 2720 | return 0; |
2718 | } | 2721 | } |
2719 | 2722 | ||
@@ -2742,7 +2745,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | |||
2742 | enum charge_type ctype); | 2745 | enum charge_type ctype); |
2743 | 2746 | ||
2744 | static void | 2747 | static void |
2745 | __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem, | 2748 | __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg, |
2746 | enum charge_type ctype) | 2749 | enum charge_type ctype) |
2747 | { | 2750 | { |
2748 | struct page_cgroup *pc = lookup_page_cgroup(page); | 2751 | struct page_cgroup *pc = lookup_page_cgroup(page); |
@@ -2752,7 +2755,7 @@ __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem, | |||
2752 | * LRU. Take care of it. | 2755 | * LRU. Take care of it. |
2753 | */ | 2756 | */ |
2754 | mem_cgroup_lru_del_before_commit(page); | 2757 | mem_cgroup_lru_del_before_commit(page); |
2755 | __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); | 2758 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); |
2756 | mem_cgroup_lru_add_after_commit(page); | 2759 | mem_cgroup_lru_add_after_commit(page); |
2757 | return; | 2760 | return; |
2758 | } | 2761 | } |
@@ -2760,7 +2763,7 @@ __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem, | |||
2760 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 2763 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
2761 | gfp_t gfp_mask) | 2764 | gfp_t gfp_mask) |
2762 | { | 2765 | { |
2763 | struct mem_cgroup *mem = NULL; | 2766 | struct mem_cgroup *memcg = NULL; |
2764 | int ret; | 2767 | int ret; |
2765 | 2768 | ||
2766 | if (mem_cgroup_disabled()) | 2769 | if (mem_cgroup_disabled()) |
@@ -2772,8 +2775,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
2772 | mm = &init_mm; | 2775 | mm = &init_mm; |
2773 | 2776 | ||
2774 | if (page_is_file_cache(page)) { | 2777 | if (page_is_file_cache(page)) { |
2775 | ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true); | 2778 | ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &memcg, true); |
2776 | if (ret || !mem) | 2779 | if (ret || !memcg) |
2777 | return ret; | 2780 | return ret; |
2778 | 2781 | ||
2779 | /* | 2782 | /* |
@@ -2781,15 +2784,15 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
2781 | * put that would remove them from the LRU list, make | 2784 | * put that would remove them from the LRU list, make |
2782 | * sure that they get relinked properly. | 2785 | * sure that they get relinked properly. |
2783 | */ | 2786 | */ |
2784 | __mem_cgroup_commit_charge_lrucare(page, mem, | 2787 | __mem_cgroup_commit_charge_lrucare(page, memcg, |
2785 | MEM_CGROUP_CHARGE_TYPE_CACHE); | 2788 | MEM_CGROUP_CHARGE_TYPE_CACHE); |
2786 | return ret; | 2789 | return ret; |
2787 | } | 2790 | } |
2788 | /* shmem */ | 2791 | /* shmem */ |
2789 | if (PageSwapCache(page)) { | 2792 | if (PageSwapCache(page)) { |
2790 | ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); | 2793 | ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg); |
2791 | if (!ret) | 2794 | if (!ret) |
2792 | __mem_cgroup_commit_charge_swapin(page, mem, | 2795 | __mem_cgroup_commit_charge_swapin(page, memcg, |
2793 | MEM_CGROUP_CHARGE_TYPE_SHMEM); | 2796 | MEM_CGROUP_CHARGE_TYPE_SHMEM); |
2794 | } else | 2797 | } else |
2795 | ret = mem_cgroup_charge_common(page, mm, gfp_mask, | 2798 | ret = mem_cgroup_charge_common(page, mm, gfp_mask, |
@@ -2808,7 +2811,7 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | |||
2808 | struct page *page, | 2811 | struct page *page, |
2809 | gfp_t mask, struct mem_cgroup **ptr) | 2812 | gfp_t mask, struct mem_cgroup **ptr) |
2810 | { | 2813 | { |
2811 | struct mem_cgroup *mem; | 2814 | struct mem_cgroup *memcg; |
2812 | int ret; | 2815 | int ret; |
2813 | 2816 | ||
2814 | *ptr = NULL; | 2817 | *ptr = NULL; |
@@ -2826,12 +2829,12 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | |||
2826 | */ | 2829 | */ |
2827 | if (!PageSwapCache(page)) | 2830 | if (!PageSwapCache(page)) |
2828 | goto charge_cur_mm; | 2831 | goto charge_cur_mm; |
2829 | mem = try_get_mem_cgroup_from_page(page); | 2832 | memcg = try_get_mem_cgroup_from_page(page); |
2830 | if (!mem) | 2833 | if (!memcg) |
2831 | goto charge_cur_mm; | 2834 | goto charge_cur_mm; |
2832 | *ptr = mem; | 2835 | *ptr = memcg; |
2833 | ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); | 2836 | ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true); |
2834 | css_put(&mem->css); | 2837 | css_put(&memcg->css); |
2835 | return ret; | 2838 | return ret; |
2836 | charge_cur_mm: | 2839 | charge_cur_mm: |
2837 | if (unlikely(!mm)) | 2840 | if (unlikely(!mm)) |
@@ -2891,16 +2894,16 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | |||
2891 | MEM_CGROUP_CHARGE_TYPE_MAPPED); | 2894 | MEM_CGROUP_CHARGE_TYPE_MAPPED); |
2892 | } | 2895 | } |
2893 | 2896 | ||
2894 | void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) | 2897 | void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) |
2895 | { | 2898 | { |
2896 | if (mem_cgroup_disabled()) | 2899 | if (mem_cgroup_disabled()) |
2897 | return; | 2900 | return; |
2898 | if (!mem) | 2901 | if (!memcg) |
2899 | return; | 2902 | return; |
2900 | __mem_cgroup_cancel_charge(mem, 1); | 2903 | __mem_cgroup_cancel_charge(memcg, 1); |
2901 | } | 2904 | } |
2902 | 2905 | ||
2903 | static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, | 2906 | static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg, |
2904 | unsigned int nr_pages, | 2907 | unsigned int nr_pages, |
2905 | const enum charge_type ctype) | 2908 | const enum charge_type ctype) |
2906 | { | 2909 | { |
@@ -2918,7 +2921,7 @@ static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, | |||
2918 | * uncharges. Then, it's ok to ignore memcg's refcnt. | 2921 | * uncharges. Then, it's ok to ignore memcg's refcnt. |
2919 | */ | 2922 | */ |
2920 | if (!batch->memcg) | 2923 | if (!batch->memcg) |
2921 | batch->memcg = mem; | 2924 | batch->memcg = memcg; |
2922 | /* | 2925 | /* |
2923 | * do_batch > 0 when unmapping pages or inode invalidate/truncate. | 2926 | * do_batch > 0 when unmapping pages or inode invalidate/truncate. |
2924 | * In those cases, all pages freed continuously can be expected to be in | 2927 | * In those cases, all pages freed continuously can be expected to be in |
@@ -2938,7 +2941,7 @@ static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, | |||
2938 | * merge a series of uncharges to an uncharge of res_counter. | 2941 | * merge a series of uncharges to an uncharge of res_counter. |
2939 | * If not, we uncharge res_counter ony by one. | 2942 | * If not, we uncharge res_counter ony by one. |
2940 | */ | 2943 | */ |
2941 | if (batch->memcg != mem) | 2944 | if (batch->memcg != memcg) |
2942 | goto direct_uncharge; | 2945 | goto direct_uncharge; |
2943 | /* remember freed charge and uncharge it later */ | 2946 | /* remember freed charge and uncharge it later */ |
2944 | batch->nr_pages++; | 2947 | batch->nr_pages++; |
@@ -2946,11 +2949,11 @@ static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, | |||
2946 | batch->memsw_nr_pages++; | 2949 | batch->memsw_nr_pages++; |
2947 | return; | 2950 | return; |
2948 | direct_uncharge: | 2951 | direct_uncharge: |
2949 | res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE); | 2952 | res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE); |
2950 | if (uncharge_memsw) | 2953 | if (uncharge_memsw) |
2951 | res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE); | 2954 | res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE); |
2952 | if (unlikely(batch->memcg != mem)) | 2955 | if (unlikely(batch->memcg != memcg)) |
2953 | memcg_oom_recover(mem); | 2956 | memcg_oom_recover(memcg); |
2954 | return; | 2957 | return; |
2955 | } | 2958 | } |
2956 | 2959 | ||
@@ -2960,7 +2963,7 @@ direct_uncharge: | |||
2960 | static struct mem_cgroup * | 2963 | static struct mem_cgroup * |
2961 | __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) | 2964 | __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) |
2962 | { | 2965 | { |
2963 | struct mem_cgroup *mem = NULL; | 2966 | struct mem_cgroup *memcg = NULL; |
2964 | unsigned int nr_pages = 1; | 2967 | unsigned int nr_pages = 1; |
2965 | struct page_cgroup *pc; | 2968 | struct page_cgroup *pc; |
2966 | 2969 | ||
@@ -2983,7 +2986,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) | |||
2983 | 2986 | ||
2984 | lock_page_cgroup(pc); | 2987 | lock_page_cgroup(pc); |
2985 | 2988 | ||
2986 | mem = pc->mem_cgroup; | 2989 | memcg = pc->mem_cgroup; |
2987 | 2990 | ||
2988 | if (!PageCgroupUsed(pc)) | 2991 | if (!PageCgroupUsed(pc)) |
2989 | goto unlock_out; | 2992 | goto unlock_out; |
@@ -3006,7 +3009,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) | |||
3006 | break; | 3009 | break; |
3007 | } | 3010 | } |
3008 | 3011 | ||
3009 | mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages); | 3012 | mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages); |
3010 | 3013 | ||
3011 | ClearPageCgroupUsed(pc); | 3014 | ClearPageCgroupUsed(pc); |
3012 | /* | 3015 | /* |
@@ -3018,18 +3021,18 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) | |||
3018 | 3021 | ||
3019 | unlock_page_cgroup(pc); | 3022 | unlock_page_cgroup(pc); |
3020 | /* | 3023 | /* |
3021 | * even after unlock, we have mem->res.usage here and this memcg | 3024 | * even after unlock, we have memcg->res.usage here and this memcg |
3022 | * will never be freed. | 3025 | * will never be freed. |
3023 | */ | 3026 | */ |
3024 | memcg_check_events(mem, page); | 3027 | memcg_check_events(memcg, page); |
3025 | if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { | 3028 | if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { |
3026 | mem_cgroup_swap_statistics(mem, true); | 3029 | mem_cgroup_swap_statistics(memcg, true); |
3027 | mem_cgroup_get(mem); | 3030 | mem_cgroup_get(memcg); |
3028 | } | 3031 | } |
3029 | if (!mem_cgroup_is_root(mem)) | 3032 | if (!mem_cgroup_is_root(memcg)) |
3030 | mem_cgroup_do_uncharge(mem, nr_pages, ctype); | 3033 | mem_cgroup_do_uncharge(memcg, nr_pages, ctype); |
3031 | 3034 | ||
3032 | return mem; | 3035 | return memcg; |
3033 | 3036 | ||
3034 | unlock_out: | 3037 | unlock_out: |
3035 | unlock_page_cgroup(pc); | 3038 | unlock_page_cgroup(pc); |
@@ -3219,7 +3222,7 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, | |||
3219 | int mem_cgroup_prepare_migration(struct page *page, | 3222 | int mem_cgroup_prepare_migration(struct page *page, |
3220 | struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) | 3223 | struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) |
3221 | { | 3224 | { |
3222 | struct mem_cgroup *mem = NULL; | 3225 | struct mem_cgroup *memcg = NULL; |
3223 | struct page_cgroup *pc; | 3226 | struct page_cgroup *pc; |
3224 | enum charge_type ctype; | 3227 | enum charge_type ctype; |
3225 | int ret = 0; | 3228 | int ret = 0; |
@@ -3233,8 +3236,8 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
3233 | pc = lookup_page_cgroup(page); | 3236 | pc = lookup_page_cgroup(page); |
3234 | lock_page_cgroup(pc); | 3237 | lock_page_cgroup(pc); |
3235 | if (PageCgroupUsed(pc)) { | 3238 | if (PageCgroupUsed(pc)) { |
3236 | mem = pc->mem_cgroup; | 3239 | memcg = pc->mem_cgroup; |
3237 | css_get(&mem->css); | 3240 | css_get(&memcg->css); |
3238 | /* | 3241 | /* |
3239 | * At migrating an anonymous page, its mapcount goes down | 3242 | * At migrating an anonymous page, its mapcount goes down |
3240 | * to 0 and uncharge() will be called. But, even if it's fully | 3243 | * to 0 and uncharge() will be called. But, even if it's fully |
@@ -3272,12 +3275,12 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
3272 | * If the page is not charged at this point, | 3275 | * If the page is not charged at this point, |
3273 | * we return here. | 3276 | * we return here. |
3274 | */ | 3277 | */ |
3275 | if (!mem) | 3278 | if (!memcg) |
3276 | return 0; | 3279 | return 0; |
3277 | 3280 | ||
3278 | *ptr = mem; | 3281 | *ptr = memcg; |
3279 | ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); | 3282 | ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false); |
3280 | css_put(&mem->css);/* drop extra refcnt */ | 3283 | css_put(&memcg->css);/* drop extra refcnt */ |
3281 | if (ret || *ptr == NULL) { | 3284 | if (ret || *ptr == NULL) { |
3282 | if (PageAnon(page)) { | 3285 | if (PageAnon(page)) { |
3283 | lock_page_cgroup(pc); | 3286 | lock_page_cgroup(pc); |
@@ -3303,21 +3306,21 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
3303 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; | 3306 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; |
3304 | else | 3307 | else |
3305 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; | 3308 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; |
3306 | __mem_cgroup_commit_charge(mem, page, 1, pc, ctype); | 3309 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); |
3307 | return ret; | 3310 | return ret; |
3308 | } | 3311 | } |
3309 | 3312 | ||
3310 | /* remove redundant charge if migration failed*/ | 3313 | /* remove redundant charge if migration failed*/ |
3311 | void mem_cgroup_end_migration(struct mem_cgroup *mem, | 3314 | void mem_cgroup_end_migration(struct mem_cgroup *memcg, |
3312 | struct page *oldpage, struct page *newpage, bool migration_ok) | 3315 | struct page *oldpage, struct page *newpage, bool migration_ok) |
3313 | { | 3316 | { |
3314 | struct page *used, *unused; | 3317 | struct page *used, *unused; |
3315 | struct page_cgroup *pc; | 3318 | struct page_cgroup *pc; |
3316 | 3319 | ||
3317 | if (!mem) | 3320 | if (!memcg) |
3318 | return; | 3321 | return; |
3319 | /* blocks rmdir() */ | 3322 | /* blocks rmdir() */ |
3320 | cgroup_exclude_rmdir(&mem->css); | 3323 | cgroup_exclude_rmdir(&memcg->css); |
3321 | if (!migration_ok) { | 3324 | if (!migration_ok) { |
3322 | used = oldpage; | 3325 | used = oldpage; |
3323 | unused = newpage; | 3326 | unused = newpage; |
@@ -3353,7 +3356,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
3353 | * So, rmdir()->pre_destroy() can be called while we do this charge. | 3356 | * So, rmdir()->pre_destroy() can be called while we do this charge. |
3354 | * In that case, we need to call pre_destroy() again. check it here. | 3357 | * In that case, we need to call pre_destroy() again. check it here. |
3355 | */ | 3358 | */ |
3356 | cgroup_release_and_wakeup_rmdir(&mem->css); | 3359 | cgroup_release_and_wakeup_rmdir(&memcg->css); |
3357 | } | 3360 | } |
3358 | 3361 | ||
3359 | #ifdef CONFIG_DEBUG_VM | 3362 | #ifdef CONFIG_DEBUG_VM |
@@ -3432,7 +3435,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, | |||
3432 | /* | 3435 | /* |
3433 | * Rather than hide all in some function, I do this in | 3436 | * Rather than hide all in some function, I do this in |
3434 | * open coded manner. You see what this really does. | 3437 | * open coded manner. You see what this really does. |
3435 | * We have to guarantee mem->res.limit < mem->memsw.limit. | 3438 | * We have to guarantee memcg->res.limit < memcg->memsw.limit. |
3436 | */ | 3439 | */ |
3437 | mutex_lock(&set_limit_mutex); | 3440 | mutex_lock(&set_limit_mutex); |
3438 | memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 3441 | memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); |
@@ -3494,7 +3497,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, | |||
3494 | /* | 3497 | /* |
3495 | * Rather than hide all in some function, I do this in | 3498 | * Rather than hide all in some function, I do this in |
3496 | * open coded manner. You see what this really does. | 3499 | * open coded manner. You see what this really does. |
3497 | * We have to guarantee mem->res.limit < mem->memsw.limit. | 3500 | * We have to guarantee memcg->res.limit < memcg->memsw.limit. |
3498 | */ | 3501 | */ |
3499 | mutex_lock(&set_limit_mutex); | 3502 | mutex_lock(&set_limit_mutex); |
3500 | memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 3503 | memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); |
@@ -3632,7 +3635,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
3632 | * This routine traverse page_cgroup in given list and drop them all. | 3635 | * This routine traverse page_cgroup in given list and drop them all. |
3633 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | 3636 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. |
3634 | */ | 3637 | */ |
3635 | static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, | 3638 | static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, |
3636 | int node, int zid, enum lru_list lru) | 3639 | int node, int zid, enum lru_list lru) |
3637 | { | 3640 | { |
3638 | struct zone *zone; | 3641 | struct zone *zone; |
@@ -3643,7 +3646,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, | |||
3643 | int ret = 0; | 3646 | int ret = 0; |
3644 | 3647 | ||
3645 | zone = &NODE_DATA(node)->node_zones[zid]; | 3648 | zone = &NODE_DATA(node)->node_zones[zid]; |
3646 | mz = mem_cgroup_zoneinfo(mem, node, zid); | 3649 | mz = mem_cgroup_zoneinfo(memcg, node, zid); |
3647 | list = &mz->lists[lru]; | 3650 | list = &mz->lists[lru]; |
3648 | 3651 | ||
3649 | loop = MEM_CGROUP_ZSTAT(mz, lru); | 3652 | loop = MEM_CGROUP_ZSTAT(mz, lru); |
@@ -3670,7 +3673,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, | |||
3670 | 3673 | ||
3671 | page = lookup_cgroup_page(pc); | 3674 | page = lookup_cgroup_page(pc); |
3672 | 3675 | ||
3673 | ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL); | 3676 | ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL); |
3674 | if (ret == -ENOMEM) | 3677 | if (ret == -ENOMEM) |
3675 | break; | 3678 | break; |
3676 | 3679 | ||
@@ -3691,14 +3694,14 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, | |||
3691 | * make mem_cgroup's charge to be 0 if there is no task. | 3694 | * make mem_cgroup's charge to be 0 if there is no task. |
3692 | * This enables deleting this mem_cgroup. | 3695 | * This enables deleting this mem_cgroup. |
3693 | */ | 3696 | */ |
3694 | static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) | 3697 | static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all) |
3695 | { | 3698 | { |
3696 | int ret; | 3699 | int ret; |
3697 | int node, zid, shrink; | 3700 | int node, zid, shrink; |
3698 | int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | 3701 | int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; |
3699 | struct cgroup *cgrp = mem->css.cgroup; | 3702 | struct cgroup *cgrp = memcg->css.cgroup; |
3700 | 3703 | ||
3701 | css_get(&mem->css); | 3704 | css_get(&memcg->css); |
3702 | 3705 | ||
3703 | shrink = 0; | 3706 | shrink = 0; |
3704 | /* should free all ? */ | 3707 | /* should free all ? */ |
@@ -3714,14 +3717,14 @@ move_account: | |||
3714 | goto out; | 3717 | goto out; |
3715 | /* This is for making all *used* pages to be on LRU. */ | 3718 | /* This is for making all *used* pages to be on LRU. */ |
3716 | lru_add_drain_all(); | 3719 | lru_add_drain_all(); |
3717 | drain_all_stock_sync(mem); | 3720 | drain_all_stock_sync(memcg); |
3718 | ret = 0; | 3721 | ret = 0; |
3719 | mem_cgroup_start_move(mem); | 3722 | mem_cgroup_start_move(memcg); |
3720 | for_each_node_state(node, N_HIGH_MEMORY) { | 3723 | for_each_node_state(node, N_HIGH_MEMORY) { |
3721 | for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { | 3724 | for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { |
3722 | enum lru_list l; | 3725 | enum lru_list l; |
3723 | for_each_lru(l) { | 3726 | for_each_lru(l) { |
3724 | ret = mem_cgroup_force_empty_list(mem, | 3727 | ret = mem_cgroup_force_empty_list(memcg, |
3725 | node, zid, l); | 3728 | node, zid, l); |
3726 | if (ret) | 3729 | if (ret) |
3727 | break; | 3730 | break; |
@@ -3730,16 +3733,16 @@ move_account: | |||
3730 | if (ret) | 3733 | if (ret) |
3731 | break; | 3734 | break; |
3732 | } | 3735 | } |
3733 | mem_cgroup_end_move(mem); | 3736 | mem_cgroup_end_move(memcg); |
3734 | memcg_oom_recover(mem); | 3737 | memcg_oom_recover(memcg); |
3735 | /* it seems parent cgroup doesn't have enough mem */ | 3738 | /* it seems parent cgroup doesn't have enough mem */ |
3736 | if (ret == -ENOMEM) | 3739 | if (ret == -ENOMEM) |
3737 | goto try_to_free; | 3740 | goto try_to_free; |
3738 | cond_resched(); | 3741 | cond_resched(); |
3739 | /* "ret" should also be checked to ensure all lists are empty. */ | 3742 | /* "ret" should also be checked to ensure all lists are empty. */ |
3740 | } while (mem->res.usage > 0 || ret); | 3743 | } while (memcg->res.usage > 0 || ret); |
3741 | out: | 3744 | out: |
3742 | css_put(&mem->css); | 3745 | css_put(&memcg->css); |
3743 | return ret; | 3746 | return ret; |
3744 | 3747 | ||
3745 | try_to_free: | 3748 | try_to_free: |
@@ -3752,14 +3755,14 @@ try_to_free: | |||
3752 | lru_add_drain_all(); | 3755 | lru_add_drain_all(); |
3753 | /* try to free all pages in this cgroup */ | 3756 | /* try to free all pages in this cgroup */ |
3754 | shrink = 1; | 3757 | shrink = 1; |
3755 | while (nr_retries && mem->res.usage > 0) { | 3758 | while (nr_retries && memcg->res.usage > 0) { |
3756 | int progress; | 3759 | int progress; |
3757 | 3760 | ||
3758 | if (signal_pending(current)) { | 3761 | if (signal_pending(current)) { |
3759 | ret = -EINTR; | 3762 | ret = -EINTR; |
3760 | goto out; | 3763 | goto out; |
3761 | } | 3764 | } |
3762 | progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, | 3765 | progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, |
3763 | false); | 3766 | false); |
3764 | if (!progress) { | 3767 | if (!progress) { |
3765 | nr_retries--; | 3768 | nr_retries--; |
@@ -3788,12 +3791,12 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, | |||
3788 | u64 val) | 3791 | u64 val) |
3789 | { | 3792 | { |
3790 | int retval = 0; | 3793 | int retval = 0; |
3791 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 3794 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
3792 | struct cgroup *parent = cont->parent; | 3795 | struct cgroup *parent = cont->parent; |
3793 | struct mem_cgroup *parent_mem = NULL; | 3796 | struct mem_cgroup *parent_memcg = NULL; |
3794 | 3797 | ||
3795 | if (parent) | 3798 | if (parent) |
3796 | parent_mem = mem_cgroup_from_cont(parent); | 3799 | parent_memcg = mem_cgroup_from_cont(parent); |
3797 | 3800 | ||
3798 | cgroup_lock(); | 3801 | cgroup_lock(); |
3799 | /* | 3802 | /* |
@@ -3804,10 +3807,10 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, | |||
3804 | * For the root cgroup, parent_mem is NULL, we allow value to be | 3807 | * For the root cgroup, parent_mem is NULL, we allow value to be |
3805 | * set if there are no children. | 3808 | * set if there are no children. |
3806 | */ | 3809 | */ |
3807 | if ((!parent_mem || !parent_mem->use_hierarchy) && | 3810 | if ((!parent_memcg || !parent_memcg->use_hierarchy) && |
3808 | (val == 1 || val == 0)) { | 3811 | (val == 1 || val == 0)) { |
3809 | if (list_empty(&cont->children)) | 3812 | if (list_empty(&cont->children)) |
3810 | mem->use_hierarchy = val; | 3813 | memcg->use_hierarchy = val; |
3811 | else | 3814 | else |
3812 | retval = -EBUSY; | 3815 | retval = -EBUSY; |
3813 | } else | 3816 | } else |
@@ -3818,14 +3821,14 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, | |||
3818 | } | 3821 | } |
3819 | 3822 | ||
3820 | 3823 | ||
3821 | static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem, | 3824 | static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, |
3822 | enum mem_cgroup_stat_index idx) | 3825 | enum mem_cgroup_stat_index idx) |
3823 | { | 3826 | { |
3824 | struct mem_cgroup *iter; | 3827 | struct mem_cgroup *iter; |
3825 | long val = 0; | 3828 | long val = 0; |
3826 | 3829 | ||
3827 | /* Per-cpu values can be negative, use a signed accumulator */ | 3830 | /* Per-cpu values can be negative, use a signed accumulator */ |
3828 | for_each_mem_cgroup_tree(iter, mem) | 3831 | for_each_mem_cgroup_tree(iter, memcg) |
3829 | val += mem_cgroup_read_stat(iter, idx); | 3832 | val += mem_cgroup_read_stat(iter, idx); |
3830 | 3833 | ||
3831 | if (val < 0) /* race ? */ | 3834 | if (val < 0) /* race ? */ |
@@ -3833,29 +3836,29 @@ static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem, | |||
3833 | return val; | 3836 | return val; |
3834 | } | 3837 | } |
3835 | 3838 | ||
3836 | static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) | 3839 | static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) |
3837 | { | 3840 | { |
3838 | u64 val; | 3841 | u64 val; |
3839 | 3842 | ||
3840 | if (!mem_cgroup_is_root(mem)) { | 3843 | if (!mem_cgroup_is_root(memcg)) { |
3841 | if (!swap) | 3844 | if (!swap) |
3842 | return res_counter_read_u64(&mem->res, RES_USAGE); | 3845 | return res_counter_read_u64(&memcg->res, RES_USAGE); |
3843 | else | 3846 | else |
3844 | return res_counter_read_u64(&mem->memsw, RES_USAGE); | 3847 | return res_counter_read_u64(&memcg->memsw, RES_USAGE); |
3845 | } | 3848 | } |
3846 | 3849 | ||
3847 | val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE); | 3850 | val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); |
3848 | val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS); | 3851 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); |
3849 | 3852 | ||
3850 | if (swap) | 3853 | if (swap) |
3851 | val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT); | 3854 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT); |
3852 | 3855 | ||
3853 | return val << PAGE_SHIFT; | 3856 | return val << PAGE_SHIFT; |
3854 | } | 3857 | } |
3855 | 3858 | ||
3856 | static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) | 3859 | static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) |
3857 | { | 3860 | { |
3858 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 3861 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
3859 | u64 val; | 3862 | u64 val; |
3860 | int type, name; | 3863 | int type, name; |
3861 | 3864 | ||
@@ -3864,15 +3867,15 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) | |||
3864 | switch (type) { | 3867 | switch (type) { |
3865 | case _MEM: | 3868 | case _MEM: |
3866 | if (name == RES_USAGE) | 3869 | if (name == RES_USAGE) |
3867 | val = mem_cgroup_usage(mem, false); | 3870 | val = mem_cgroup_usage(memcg, false); |
3868 | else | 3871 | else |
3869 | val = res_counter_read_u64(&mem->res, name); | 3872 | val = res_counter_read_u64(&memcg->res, name); |
3870 | break; | 3873 | break; |
3871 | case _MEMSWAP: | 3874 | case _MEMSWAP: |
3872 | if (name == RES_USAGE) | 3875 | if (name == RES_USAGE) |
3873 | val = mem_cgroup_usage(mem, true); | 3876 | val = mem_cgroup_usage(memcg, true); |
3874 | else | 3877 | else |
3875 | val = res_counter_read_u64(&mem->memsw, name); | 3878 | val = res_counter_read_u64(&memcg->memsw, name); |
3876 | break; | 3879 | break; |
3877 | default: | 3880 | default: |
3878 | BUG(); | 3881 | BUG(); |
@@ -3960,24 +3963,24 @@ out: | |||
3960 | 3963 | ||
3961 | static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) | 3964 | static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) |
3962 | { | 3965 | { |
3963 | struct mem_cgroup *mem; | 3966 | struct mem_cgroup *memcg; |
3964 | int type, name; | 3967 | int type, name; |
3965 | 3968 | ||
3966 | mem = mem_cgroup_from_cont(cont); | 3969 | memcg = mem_cgroup_from_cont(cont); |
3967 | type = MEMFILE_TYPE(event); | 3970 | type = MEMFILE_TYPE(event); |
3968 | name = MEMFILE_ATTR(event); | 3971 | name = MEMFILE_ATTR(event); |
3969 | switch (name) { | 3972 | switch (name) { |
3970 | case RES_MAX_USAGE: | 3973 | case RES_MAX_USAGE: |
3971 | if (type == _MEM) | 3974 | if (type == _MEM) |
3972 | res_counter_reset_max(&mem->res); | 3975 | res_counter_reset_max(&memcg->res); |
3973 | else | 3976 | else |
3974 | res_counter_reset_max(&mem->memsw); | 3977 | res_counter_reset_max(&memcg->memsw); |
3975 | break; | 3978 | break; |
3976 | case RES_FAILCNT: | 3979 | case RES_FAILCNT: |
3977 | if (type == _MEM) | 3980 | if (type == _MEM) |
3978 | res_counter_reset_failcnt(&mem->res); | 3981 | res_counter_reset_failcnt(&memcg->res); |
3979 | else | 3982 | else |
3980 | res_counter_reset_failcnt(&mem->memsw); | 3983 | res_counter_reset_failcnt(&memcg->memsw); |
3981 | break; | 3984 | break; |
3982 | } | 3985 | } |
3983 | 3986 | ||
@@ -3994,7 +3997,7 @@ static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, | |||
3994 | static int mem_cgroup_move_charge_write(struct cgroup *cgrp, | 3997 | static int mem_cgroup_move_charge_write(struct cgroup *cgrp, |
3995 | struct cftype *cft, u64 val) | 3998 | struct cftype *cft, u64 val) |
3996 | { | 3999 | { |
3997 | struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 4000 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
3998 | 4001 | ||
3999 | if (val >= (1 << NR_MOVE_TYPE)) | 4002 | if (val >= (1 << NR_MOVE_TYPE)) |
4000 | return -EINVAL; | 4003 | return -EINVAL; |
@@ -4004,7 +4007,7 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp, | |||
4004 | * inconsistent. | 4007 | * inconsistent. |
4005 | */ | 4008 | */ |
4006 | cgroup_lock(); | 4009 | cgroup_lock(); |
4007 | mem->move_charge_at_immigrate = val; | 4010 | memcg->move_charge_at_immigrate = val; |
4008 | cgroup_unlock(); | 4011 | cgroup_unlock(); |
4009 | 4012 | ||
4010 | return 0; | 4013 | return 0; |
@@ -4061,49 +4064,49 @@ struct { | |||
4061 | 4064 | ||
4062 | 4065 | ||
4063 | static void | 4066 | static void |
4064 | mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | 4067 | mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s) |
4065 | { | 4068 | { |
4066 | s64 val; | 4069 | s64 val; |
4067 | 4070 | ||
4068 | /* per cpu stat */ | 4071 | /* per cpu stat */ |
4069 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); | 4072 | val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE); |
4070 | s->stat[MCS_CACHE] += val * PAGE_SIZE; | 4073 | s->stat[MCS_CACHE] += val * PAGE_SIZE; |
4071 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); | 4074 | val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS); |
4072 | s->stat[MCS_RSS] += val * PAGE_SIZE; | 4075 | s->stat[MCS_RSS] += val * PAGE_SIZE; |
4073 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); | 4076 | val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); |
4074 | s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; | 4077 | s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; |
4075 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN); | 4078 | val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN); |
4076 | s->stat[MCS_PGPGIN] += val; | 4079 | s->stat[MCS_PGPGIN] += val; |
4077 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT); | 4080 | val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT); |
4078 | s->stat[MCS_PGPGOUT] += val; | 4081 | s->stat[MCS_PGPGOUT] += val; |
4079 | if (do_swap_account) { | 4082 | if (do_swap_account) { |
4080 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); | 4083 | val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT); |
4081 | s->stat[MCS_SWAP] += val * PAGE_SIZE; | 4084 | s->stat[MCS_SWAP] += val * PAGE_SIZE; |
4082 | } | 4085 | } |
4083 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT); | 4086 | val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT); |
4084 | s->stat[MCS_PGFAULT] += val; | 4087 | s->stat[MCS_PGFAULT] += val; |
4085 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT); | 4088 | val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT); |
4086 | s->stat[MCS_PGMAJFAULT] += val; | 4089 | s->stat[MCS_PGMAJFAULT] += val; |
4087 | 4090 | ||
4088 | /* per zone stat */ | 4091 | /* per zone stat */ |
4089 | val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON)); | 4092 | val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON)); |
4090 | s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; | 4093 | s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; |
4091 | val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON)); | 4094 | val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON)); |
4092 | s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; | 4095 | s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; |
4093 | val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE)); | 4096 | val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE)); |
4094 | s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; | 4097 | s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; |
4095 | val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE)); | 4098 | val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE)); |
4096 | s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; | 4099 | s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; |
4097 | val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE)); | 4100 | val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); |
4098 | s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; | 4101 | s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; |
4099 | } | 4102 | } |
4100 | 4103 | ||
4101 | static void | 4104 | static void |
4102 | mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | 4105 | mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s) |
4103 | { | 4106 | { |
4104 | struct mem_cgroup *iter; | 4107 | struct mem_cgroup *iter; |
4105 | 4108 | ||
4106 | for_each_mem_cgroup_tree(iter, mem) | 4109 | for_each_mem_cgroup_tree(iter, memcg) |
4107 | mem_cgroup_get_local_stat(iter, s); | 4110 | mem_cgroup_get_local_stat(iter, s); |
4108 | } | 4111 | } |
4109 | 4112 | ||
@@ -4327,20 +4330,20 @@ static int compare_thresholds(const void *a, const void *b) | |||
4327 | return _a->threshold - _b->threshold; | 4330 | return _a->threshold - _b->threshold; |
4328 | } | 4331 | } |
4329 | 4332 | ||
4330 | static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) | 4333 | static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) |
4331 | { | 4334 | { |
4332 | struct mem_cgroup_eventfd_list *ev; | 4335 | struct mem_cgroup_eventfd_list *ev; |
4333 | 4336 | ||
4334 | list_for_each_entry(ev, &mem->oom_notify, list) | 4337 | list_for_each_entry(ev, &memcg->oom_notify, list) |
4335 | eventfd_signal(ev->eventfd, 1); | 4338 | eventfd_signal(ev->eventfd, 1); |
4336 | return 0; | 4339 | return 0; |
4337 | } | 4340 | } |
4338 | 4341 | ||
4339 | static void mem_cgroup_oom_notify(struct mem_cgroup *mem) | 4342 | static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) |
4340 | { | 4343 | { |
4341 | struct mem_cgroup *iter; | 4344 | struct mem_cgroup *iter; |
4342 | 4345 | ||
4343 | for_each_mem_cgroup_tree(iter, mem) | 4346 | for_each_mem_cgroup_tree(iter, memcg) |
4344 | mem_cgroup_oom_notify_cb(iter); | 4347 | mem_cgroup_oom_notify_cb(iter); |
4345 | } | 4348 | } |
4346 | 4349 | ||
@@ -4530,7 +4533,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp, | |||
4530 | static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, | 4533 | static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, |
4531 | struct cftype *cft, struct eventfd_ctx *eventfd) | 4534 | struct cftype *cft, struct eventfd_ctx *eventfd) |
4532 | { | 4535 | { |
4533 | struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 4536 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
4534 | struct mem_cgroup_eventfd_list *ev, *tmp; | 4537 | struct mem_cgroup_eventfd_list *ev, *tmp; |
4535 | int type = MEMFILE_TYPE(cft->private); | 4538 | int type = MEMFILE_TYPE(cft->private); |
4536 | 4539 | ||
@@ -4538,7 +4541,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, | |||
4538 | 4541 | ||
4539 | spin_lock(&memcg_oom_lock); | 4542 | spin_lock(&memcg_oom_lock); |
4540 | 4543 | ||
4541 | list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { | 4544 | list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { |
4542 | if (ev->eventfd == eventfd) { | 4545 | if (ev->eventfd == eventfd) { |
4543 | list_del(&ev->list); | 4546 | list_del(&ev->list); |
4544 | kfree(ev); | 4547 | kfree(ev); |
@@ -4551,11 +4554,11 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, | |||
4551 | static int mem_cgroup_oom_control_read(struct cgroup *cgrp, | 4554 | static int mem_cgroup_oom_control_read(struct cgroup *cgrp, |
4552 | struct cftype *cft, struct cgroup_map_cb *cb) | 4555 | struct cftype *cft, struct cgroup_map_cb *cb) |
4553 | { | 4556 | { |
4554 | struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 4557 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
4555 | 4558 | ||
4556 | cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable); | 4559 | cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable); |
4557 | 4560 | ||
4558 | if (atomic_read(&mem->under_oom)) | 4561 | if (atomic_read(&memcg->under_oom)) |
4559 | cb->fill(cb, "under_oom", 1); | 4562 | cb->fill(cb, "under_oom", 1); |
4560 | else | 4563 | else |
4561 | cb->fill(cb, "under_oom", 0); | 4564 | cb->fill(cb, "under_oom", 0); |
@@ -4565,7 +4568,7 @@ static int mem_cgroup_oom_control_read(struct cgroup *cgrp, | |||
4565 | static int mem_cgroup_oom_control_write(struct cgroup *cgrp, | 4568 | static int mem_cgroup_oom_control_write(struct cgroup *cgrp, |
4566 | struct cftype *cft, u64 val) | 4569 | struct cftype *cft, u64 val) |
4567 | { | 4570 | { |
4568 | struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 4571 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
4569 | struct mem_cgroup *parent; | 4572 | struct mem_cgroup *parent; |
4570 | 4573 | ||
4571 | /* cannot set to root cgroup and only 0 and 1 are allowed */ | 4574 | /* cannot set to root cgroup and only 0 and 1 are allowed */ |
@@ -4577,13 +4580,13 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, | |||
4577 | cgroup_lock(); | 4580 | cgroup_lock(); |
4578 | /* oom-kill-disable is a flag for subhierarchy. */ | 4581 | /* oom-kill-disable is a flag for subhierarchy. */ |
4579 | if ((parent->use_hierarchy) || | 4582 | if ((parent->use_hierarchy) || |
4580 | (mem->use_hierarchy && !list_empty(&cgrp->children))) { | 4583 | (memcg->use_hierarchy && !list_empty(&cgrp->children))) { |
4581 | cgroup_unlock(); | 4584 | cgroup_unlock(); |
4582 | return -EINVAL; | 4585 | return -EINVAL; |
4583 | } | 4586 | } |
4584 | mem->oom_kill_disable = val; | 4587 | memcg->oom_kill_disable = val; |
4585 | if (!val) | 4588 | if (!val) |
4586 | memcg_oom_recover(mem); | 4589 | memcg_oom_recover(memcg); |
4587 | cgroup_unlock(); | 4590 | cgroup_unlock(); |
4588 | return 0; | 4591 | return 0; |
4589 | } | 4592 | } |
@@ -4719,7 +4722,7 @@ static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) | |||
4719 | } | 4722 | } |
4720 | #endif | 4723 | #endif |
4721 | 4724 | ||
4722 | static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 4725 | static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) |
4723 | { | 4726 | { |
4724 | struct mem_cgroup_per_node *pn; | 4727 | struct mem_cgroup_per_node *pn; |
4725 | struct mem_cgroup_per_zone *mz; | 4728 | struct mem_cgroup_per_zone *mz; |
@@ -4739,21 +4742,21 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
4739 | if (!pn) | 4742 | if (!pn) |
4740 | return 1; | 4743 | return 1; |
4741 | 4744 | ||
4742 | mem->info.nodeinfo[node] = pn; | 4745 | memcg->info.nodeinfo[node] = pn; |
4743 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 4746 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
4744 | mz = &pn->zoneinfo[zone]; | 4747 | mz = &pn->zoneinfo[zone]; |
4745 | for_each_lru(l) | 4748 | for_each_lru(l) |
4746 | INIT_LIST_HEAD(&mz->lists[l]); | 4749 | INIT_LIST_HEAD(&mz->lists[l]); |
4747 | mz->usage_in_excess = 0; | 4750 | mz->usage_in_excess = 0; |
4748 | mz->on_tree = false; | 4751 | mz->on_tree = false; |
4749 | mz->mem = mem; | 4752 | mz->mem = memcg; |
4750 | } | 4753 | } |
4751 | return 0; | 4754 | return 0; |
4752 | } | 4755 | } |
4753 | 4756 | ||
4754 | static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 4757 | static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) |
4755 | { | 4758 | { |
4756 | kfree(mem->info.nodeinfo[node]); | 4759 | kfree(memcg->info.nodeinfo[node]); |
4757 | } | 4760 | } |
4758 | 4761 | ||
4759 | static struct mem_cgroup *mem_cgroup_alloc(void) | 4762 | static struct mem_cgroup *mem_cgroup_alloc(void) |
@@ -4795,51 +4798,51 @@ out_free: | |||
4795 | * Removal of cgroup itself succeeds regardless of refs from swap. | 4798 | * Removal of cgroup itself succeeds regardless of refs from swap. |
4796 | */ | 4799 | */ |
4797 | 4800 | ||
4798 | static void __mem_cgroup_free(struct mem_cgroup *mem) | 4801 | static void __mem_cgroup_free(struct mem_cgroup *memcg) |
4799 | { | 4802 | { |
4800 | int node; | 4803 | int node; |
4801 | 4804 | ||
4802 | mem_cgroup_remove_from_trees(mem); | 4805 | mem_cgroup_remove_from_trees(memcg); |
4803 | free_css_id(&mem_cgroup_subsys, &mem->css); | 4806 | free_css_id(&mem_cgroup_subsys, &memcg->css); |
4804 | 4807 | ||
4805 | for_each_node_state(node, N_POSSIBLE) | 4808 | for_each_node_state(node, N_POSSIBLE) |
4806 | free_mem_cgroup_per_zone_info(mem, node); | 4809 | free_mem_cgroup_per_zone_info(memcg, node); |
4807 | 4810 | ||
4808 | free_percpu(mem->stat); | 4811 | free_percpu(memcg->stat); |
4809 | if (sizeof(struct mem_cgroup) < PAGE_SIZE) | 4812 | if (sizeof(struct mem_cgroup) < PAGE_SIZE) |
4810 | kfree(mem); | 4813 | kfree(memcg); |
4811 | else | 4814 | else |
4812 | vfree(mem); | 4815 | vfree(memcg); |
4813 | } | 4816 | } |
4814 | 4817 | ||
4815 | static void mem_cgroup_get(struct mem_cgroup *mem) | 4818 | static void mem_cgroup_get(struct mem_cgroup *memcg) |
4816 | { | 4819 | { |
4817 | atomic_inc(&mem->refcnt); | 4820 | atomic_inc(&memcg->refcnt); |
4818 | } | 4821 | } |
4819 | 4822 | ||
4820 | static void __mem_cgroup_put(struct mem_cgroup *mem, int count) | 4823 | static void __mem_cgroup_put(struct mem_cgroup *memcg, int count) |
4821 | { | 4824 | { |
4822 | if (atomic_sub_and_test(count, &mem->refcnt)) { | 4825 | if (atomic_sub_and_test(count, &memcg->refcnt)) { |
4823 | struct mem_cgroup *parent = parent_mem_cgroup(mem); | 4826 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); |
4824 | __mem_cgroup_free(mem); | 4827 | __mem_cgroup_free(memcg); |
4825 | if (parent) | 4828 | if (parent) |
4826 | mem_cgroup_put(parent); | 4829 | mem_cgroup_put(parent); |
4827 | } | 4830 | } |
4828 | } | 4831 | } |
4829 | 4832 | ||
4830 | static void mem_cgroup_put(struct mem_cgroup *mem) | 4833 | static void mem_cgroup_put(struct mem_cgroup *memcg) |
4831 | { | 4834 | { |
4832 | __mem_cgroup_put(mem, 1); | 4835 | __mem_cgroup_put(memcg, 1); |
4833 | } | 4836 | } |
4834 | 4837 | ||
4835 | /* | 4838 | /* |
4836 | * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. | 4839 | * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. |
4837 | */ | 4840 | */ |
4838 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) | 4841 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
4839 | { | 4842 | { |
4840 | if (!mem->res.parent) | 4843 | if (!memcg->res.parent) |
4841 | return NULL; | 4844 | return NULL; |
4842 | return mem_cgroup_from_res_counter(mem->res.parent, res); | 4845 | return mem_cgroup_from_res_counter(memcg->res.parent, res); |
4843 | } | 4846 | } |
4844 | 4847 | ||
4845 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 4848 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
@@ -4882,16 +4885,16 @@ static int mem_cgroup_soft_limit_tree_init(void) | |||
4882 | static struct cgroup_subsys_state * __ref | 4885 | static struct cgroup_subsys_state * __ref |
4883 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 4886 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) |
4884 | { | 4887 | { |
4885 | struct mem_cgroup *mem, *parent; | 4888 | struct mem_cgroup *memcg, *parent; |
4886 | long error = -ENOMEM; | 4889 | long error = -ENOMEM; |
4887 | int node; | 4890 | int node; |
4888 | 4891 | ||
4889 | mem = mem_cgroup_alloc(); | 4892 | memcg = mem_cgroup_alloc(); |
4890 | if (!mem) | 4893 | if (!memcg) |
4891 | return ERR_PTR(error); | 4894 | return ERR_PTR(error); |
4892 | 4895 | ||
4893 | for_each_node_state(node, N_POSSIBLE) | 4896 | for_each_node_state(node, N_POSSIBLE) |
4894 | if (alloc_mem_cgroup_per_zone_info(mem, node)) | 4897 | if (alloc_mem_cgroup_per_zone_info(memcg, node)) |
4895 | goto free_out; | 4898 | goto free_out; |
4896 | 4899 | ||
4897 | /* root ? */ | 4900 | /* root ? */ |
@@ -4899,7 +4902,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
4899 | int cpu; | 4902 | int cpu; |
4900 | enable_swap_cgroup(); | 4903 | enable_swap_cgroup(); |
4901 | parent = NULL; | 4904 | parent = NULL; |
4902 | root_mem_cgroup = mem; | 4905 | root_mem_cgroup = memcg; |
4903 | if (mem_cgroup_soft_limit_tree_init()) | 4906 | if (mem_cgroup_soft_limit_tree_init()) |
4904 | goto free_out; | 4907 | goto free_out; |
4905 | for_each_possible_cpu(cpu) { | 4908 | for_each_possible_cpu(cpu) { |
@@ -4910,13 +4913,13 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
4910 | hotcpu_notifier(memcg_cpu_hotplug_callback, 0); | 4913 | hotcpu_notifier(memcg_cpu_hotplug_callback, 0); |
4911 | } else { | 4914 | } else { |
4912 | parent = mem_cgroup_from_cont(cont->parent); | 4915 | parent = mem_cgroup_from_cont(cont->parent); |
4913 | mem->use_hierarchy = parent->use_hierarchy; | 4916 | memcg->use_hierarchy = parent->use_hierarchy; |
4914 | mem->oom_kill_disable = parent->oom_kill_disable; | 4917 | memcg->oom_kill_disable = parent->oom_kill_disable; |
4915 | } | 4918 | } |
4916 | 4919 | ||
4917 | if (parent && parent->use_hierarchy) { | 4920 | if (parent && parent->use_hierarchy) { |
4918 | res_counter_init(&mem->res, &parent->res); | 4921 | res_counter_init(&memcg->res, &parent->res); |
4919 | res_counter_init(&mem->memsw, &parent->memsw); | 4922 | res_counter_init(&memcg->memsw, &parent->memsw); |
4920 | /* | 4923 | /* |
4921 | * We increment refcnt of the parent to ensure that we can | 4924 | * We increment refcnt of the parent to ensure that we can |
4922 | * safely access it on res_counter_charge/uncharge. | 4925 | * safely access it on res_counter_charge/uncharge. |
@@ -4925,21 +4928,21 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
4925 | */ | 4928 | */ |
4926 | mem_cgroup_get(parent); | 4929 | mem_cgroup_get(parent); |
4927 | } else { | 4930 | } else { |
4928 | res_counter_init(&mem->res, NULL); | 4931 | res_counter_init(&memcg->res, NULL); |
4929 | res_counter_init(&mem->memsw, NULL); | 4932 | res_counter_init(&memcg->memsw, NULL); |
4930 | } | 4933 | } |
4931 | mem->last_scanned_child = 0; | 4934 | memcg->last_scanned_child = 0; |
4932 | mem->last_scanned_node = MAX_NUMNODES; | 4935 | memcg->last_scanned_node = MAX_NUMNODES; |
4933 | INIT_LIST_HEAD(&mem->oom_notify); | 4936 | INIT_LIST_HEAD(&memcg->oom_notify); |
4934 | 4937 | ||
4935 | if (parent) | 4938 | if (parent) |
4936 | mem->swappiness = mem_cgroup_swappiness(parent); | 4939 | memcg->swappiness = mem_cgroup_swappiness(parent); |
4937 | atomic_set(&mem->refcnt, 1); | 4940 | atomic_set(&memcg->refcnt, 1); |
4938 | mem->move_charge_at_immigrate = 0; | 4941 | memcg->move_charge_at_immigrate = 0; |
4939 | mutex_init(&mem->thresholds_lock); | 4942 | mutex_init(&memcg->thresholds_lock); |
4940 | return &mem->css; | 4943 | return &memcg->css; |
4941 | free_out: | 4944 | free_out: |
4942 | __mem_cgroup_free(mem); | 4945 | __mem_cgroup_free(memcg); |
4943 | root_mem_cgroup = NULL; | 4946 | root_mem_cgroup = NULL; |
4944 | return ERR_PTR(error); | 4947 | return ERR_PTR(error); |
4945 | } | 4948 | } |
@@ -4947,17 +4950,17 @@ free_out: | |||
4947 | static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, | 4950 | static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, |
4948 | struct cgroup *cont) | 4951 | struct cgroup *cont) |
4949 | { | 4952 | { |
4950 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 4953 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
4951 | 4954 | ||
4952 | return mem_cgroup_force_empty(mem, false); | 4955 | return mem_cgroup_force_empty(memcg, false); |
4953 | } | 4956 | } |
4954 | 4957 | ||
4955 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | 4958 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, |
4956 | struct cgroup *cont) | 4959 | struct cgroup *cont) |
4957 | { | 4960 | { |
4958 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 4961 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
4959 | 4962 | ||
4960 | mem_cgroup_put(mem); | 4963 | mem_cgroup_put(memcg); |
4961 | } | 4964 | } |
4962 | 4965 | ||
4963 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | 4966 | static int mem_cgroup_populate(struct cgroup_subsys *ss, |
@@ -4980,9 +4983,9 @@ static int mem_cgroup_do_precharge(unsigned long count) | |||
4980 | { | 4983 | { |
4981 | int ret = 0; | 4984 | int ret = 0; |
4982 | int batch_count = PRECHARGE_COUNT_AT_ONCE; | 4985 | int batch_count = PRECHARGE_COUNT_AT_ONCE; |
4983 | struct mem_cgroup *mem = mc.to; | 4986 | struct mem_cgroup *memcg = mc.to; |
4984 | 4987 | ||
4985 | if (mem_cgroup_is_root(mem)) { | 4988 | if (mem_cgroup_is_root(memcg)) { |
4986 | mc.precharge += count; | 4989 | mc.precharge += count; |
4987 | /* we don't need css_get for root */ | 4990 | /* we don't need css_get for root */ |
4988 | return ret; | 4991 | return ret; |
@@ -4991,16 +4994,16 @@ static int mem_cgroup_do_precharge(unsigned long count) | |||
4991 | if (count > 1) { | 4994 | if (count > 1) { |
4992 | struct res_counter *dummy; | 4995 | struct res_counter *dummy; |
4993 | /* | 4996 | /* |
4994 | * "mem" cannot be under rmdir() because we've already checked | 4997 | * "memcg" cannot be under rmdir() because we've already checked |
4995 | * by cgroup_lock_live_cgroup() that it is not removed and we | 4998 | * by cgroup_lock_live_cgroup() that it is not removed and we |
4996 | * are still under the same cgroup_mutex. So we can postpone | 4999 | * are still under the same cgroup_mutex. So we can postpone |
4997 | * css_get(). | 5000 | * css_get(). |
4998 | */ | 5001 | */ |
4999 | if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) | 5002 | if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy)) |
5000 | goto one_by_one; | 5003 | goto one_by_one; |
5001 | if (do_swap_account && res_counter_charge(&mem->memsw, | 5004 | if (do_swap_account && res_counter_charge(&memcg->memsw, |
5002 | PAGE_SIZE * count, &dummy)) { | 5005 | PAGE_SIZE * count, &dummy)) { |
5003 | res_counter_uncharge(&mem->res, PAGE_SIZE * count); | 5006 | res_counter_uncharge(&memcg->res, PAGE_SIZE * count); |
5004 | goto one_by_one; | 5007 | goto one_by_one; |
5005 | } | 5008 | } |
5006 | mc.precharge += count; | 5009 | mc.precharge += count; |
@@ -5017,8 +5020,9 @@ one_by_one: | |||
5017 | batch_count = PRECHARGE_COUNT_AT_ONCE; | 5020 | batch_count = PRECHARGE_COUNT_AT_ONCE; |
5018 | cond_resched(); | 5021 | cond_resched(); |
5019 | } | 5022 | } |
5020 | ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false); | 5023 | ret = __mem_cgroup_try_charge(NULL, |
5021 | if (ret || !mem) | 5024 | GFP_KERNEL, 1, &memcg, false); |
5025 | if (ret || !memcg) | ||
5022 | /* mem_cgroup_clear_mc() will do uncharge later */ | 5026 | /* mem_cgroup_clear_mc() will do uncharge later */ |
5023 | return -ENOMEM; | 5027 | return -ENOMEM; |
5024 | mc.precharge++; | 5028 | mc.precharge++; |
@@ -5292,13 +5296,13 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
5292 | struct task_struct *p) | 5296 | struct task_struct *p) |
5293 | { | 5297 | { |
5294 | int ret = 0; | 5298 | int ret = 0; |
5295 | struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); | 5299 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup); |
5296 | 5300 | ||
5297 | if (mem->move_charge_at_immigrate) { | 5301 | if (memcg->move_charge_at_immigrate) { |
5298 | struct mm_struct *mm; | 5302 | struct mm_struct *mm; |
5299 | struct mem_cgroup *from = mem_cgroup_from_task(p); | 5303 | struct mem_cgroup *from = mem_cgroup_from_task(p); |
5300 | 5304 | ||
5301 | VM_BUG_ON(from == mem); | 5305 | VM_BUG_ON(from == memcg); |
5302 | 5306 | ||
5303 | mm = get_task_mm(p); | 5307 | mm = get_task_mm(p); |
5304 | if (!mm) | 5308 | if (!mm) |
@@ -5313,7 +5317,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
5313 | mem_cgroup_start_move(from); | 5317 | mem_cgroup_start_move(from); |
5314 | spin_lock(&mc.lock); | 5318 | spin_lock(&mc.lock); |
5315 | mc.from = from; | 5319 | mc.from = from; |
5316 | mc.to = mem; | 5320 | mc.to = memcg; |
5317 | spin_unlock(&mc.lock); | 5321 | spin_unlock(&mc.lock); |
5318 | /* We set mc.moving_task later */ | 5322 | /* We set mc.moving_task later */ |
5319 | 5323 | ||