diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 20:18:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:05 -0500 |
commit | f53d7ce32e13dbd09573b176e6521a04c2c77803 (patch) | |
tree | af88d1c256d06e3d37c5df013de9ff41f86ed4a2 /mm | |
parent | e94c8a9cbce1aee4af9e1285802785481b7f93c5 (diff) |
mm: memcg: shorten preempt-disabled section around event checks
Only the ratelimit checks themselves have to run with preemption
disabled, the resulting actions - checking for usage thresholds,
updating the soft limit tree - can and should run with preemption
enabled.
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reported-by: Yong Zhang <yong.zhang0@gmail.com>
Tested-by: Yong Zhang <yong.zhang0@gmail.com>
Reported-by: Luis Henriques <henrix@camandro.org>
Tested-by: Luis Henriques <henrix@camandro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 73 |
1 files changed, 35 insertions, 38 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 42174612cc0b..abb66a2cba65 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -748,37 +748,32 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, | |||
748 | return total; | 748 | return total; |
749 | } | 749 | } |
750 | 750 | ||
751 | static bool __memcg_event_check(struct mem_cgroup *memcg, int target) | 751 | static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, |
752 | enum mem_cgroup_events_target target) | ||
752 | { | 753 | { |
753 | unsigned long val, next; | 754 | unsigned long val, next; |
754 | 755 | ||
755 | val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); | 756 | val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); |
756 | next = __this_cpu_read(memcg->stat->targets[target]); | 757 | next = __this_cpu_read(memcg->stat->targets[target]); |
757 | /* from time_after() in jiffies.h */ | 758 | /* from time_after() in jiffies.h */ |
758 | return ((long)next - (long)val < 0); | 759 | if ((long)next - (long)val < 0) { |
759 | } | 760 | switch (target) { |
760 | 761 | case MEM_CGROUP_TARGET_THRESH: | |
761 | static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target) | 762 | next = val + THRESHOLDS_EVENTS_TARGET; |
762 | { | 763 | break; |
763 | unsigned long val, next; | 764 | case MEM_CGROUP_TARGET_SOFTLIMIT: |
764 | 765 | next = val + SOFTLIMIT_EVENTS_TARGET; | |
765 | val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); | 766 | break; |
766 | 767 | case MEM_CGROUP_TARGET_NUMAINFO: | |
767 | switch (target) { | 768 | next = val + NUMAINFO_EVENTS_TARGET; |
768 | case MEM_CGROUP_TARGET_THRESH: | 769 | break; |
769 | next = val + THRESHOLDS_EVENTS_TARGET; | 770 | default: |
770 | break; | 771 | break; |
771 | case MEM_CGROUP_TARGET_SOFTLIMIT: | 772 | } |
772 | next = val + SOFTLIMIT_EVENTS_TARGET; | 773 | __this_cpu_write(memcg->stat->targets[target], next); |
773 | break; | 774 | return true; |
774 | case MEM_CGROUP_TARGET_NUMAINFO: | ||
775 | next = val + NUMAINFO_EVENTS_TARGET; | ||
776 | break; | ||
777 | default: | ||
778 | return; | ||
779 | } | 775 | } |
780 | 776 | return false; | |
781 | __this_cpu_write(memcg->stat->targets[target], next); | ||
782 | } | 777 | } |
783 | 778 | ||
784 | /* | 779 | /* |
@@ -789,25 +784,27 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) | |||
789 | { | 784 | { |
790 | preempt_disable(); | 785 | preempt_disable(); |
791 | /* threshold event is triggered in finer grain than soft limit */ | 786 | /* threshold event is triggered in finer grain than soft limit */ |
792 | if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) { | 787 | if (unlikely(mem_cgroup_event_ratelimit(memcg, |
788 | MEM_CGROUP_TARGET_THRESH))) { | ||
789 | bool do_softlimit, do_numainfo; | ||
790 | |||
791 | do_softlimit = mem_cgroup_event_ratelimit(memcg, | ||
792 | MEM_CGROUP_TARGET_SOFTLIMIT); | ||
793 | #if MAX_NUMNODES > 1 | ||
794 | do_numainfo = mem_cgroup_event_ratelimit(memcg, | ||
795 | MEM_CGROUP_TARGET_NUMAINFO); | ||
796 | #endif | ||
797 | preempt_enable(); | ||
798 | |||
793 | mem_cgroup_threshold(memcg); | 799 | mem_cgroup_threshold(memcg); |
794 | __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH); | 800 | if (unlikely(do_softlimit)) |
795 | if (unlikely(__memcg_event_check(memcg, | ||
796 | MEM_CGROUP_TARGET_SOFTLIMIT))) { | ||
797 | mem_cgroup_update_tree(memcg, page); | 801 | mem_cgroup_update_tree(memcg, page); |
798 | __mem_cgroup_target_update(memcg, | ||
799 | MEM_CGROUP_TARGET_SOFTLIMIT); | ||
800 | } | ||
801 | #if MAX_NUMNODES > 1 | 802 | #if MAX_NUMNODES > 1 |
802 | if (unlikely(__memcg_event_check(memcg, | 803 | if (unlikely(do_numainfo)) |
803 | MEM_CGROUP_TARGET_NUMAINFO))) { | ||
804 | atomic_inc(&memcg->numainfo_events); | 804 | atomic_inc(&memcg->numainfo_events); |
805 | __mem_cgroup_target_update(memcg, | ||
806 | MEM_CGROUP_TARGET_NUMAINFO); | ||
807 | } | ||
808 | #endif | 805 | #endif |
809 | } | 806 | } else |
810 | preempt_enable(); | 807 | preempt_enable(); |
811 | } | 808 | } |
812 | 809 | ||
813 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | 810 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) |