diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2011-03-23 19:42:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-23 22:46:31 -0400 |
commit | e9f8974f2f559b00c87ccfba67bca3903f913d50 (patch) | |
tree | d19f8f7d15eb56e7af98c7ad63db8e86d5873fff /mm | |
parent | 7ec99d6213b579a84c85ad37f2aa8ded4857c53c (diff) |
memcg: break out event counters from other stats
For increasing and decreasing per-cpu cgroup usage counters it makes sense
to use signed types, as single per-cpu values might go negative during
updates. But this is not the case for only-ever-increasing event
counters.
All the counters have been signed 64-bit so far, which was enough to count
events even with the sign bit wasted.
This patch:
- divides s64 counters into signed usage counters and unsigned
monotonically increasing event counters.
- converts unsigned event counters into 'unsigned long' rather than
'u64'. This matches the type used by the /proc/vmstat event counters.
The next patch narrows the signed usage counters type (on 32-bit CPUs,
that is).
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Greg Thelen <gthelen@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 49 |
1 files changed, 37 insertions, 12 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index bc02218eab01..d884f758c0e3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -93,19 +93,22 @@ enum mem_cgroup_stat_index { | |||
93 | MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ | 93 | MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ |
94 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ | 94 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ |
95 | MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ | 95 | MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ |
96 | MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ | ||
97 | MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ | ||
98 | MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ | 96 | MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ |
99 | MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ | 97 | MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ |
100 | /* incremented at every pagein/pageout */ | ||
101 | MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA, | ||
102 | MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ | 98 | MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ |
103 | |||
104 | MEM_CGROUP_STAT_NSTATS, | 99 | MEM_CGROUP_STAT_NSTATS, |
105 | }; | 100 | }; |
106 | 101 | ||
102 | enum mem_cgroup_events_index { | ||
103 | MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ | ||
104 | MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ | ||
105 | MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ | ||
106 | MEM_CGROUP_EVENTS_NSTATS, | ||
107 | }; | ||
108 | |||
107 | struct mem_cgroup_stat_cpu { | 109 | struct mem_cgroup_stat_cpu { |
108 | s64 count[MEM_CGROUP_STAT_NSTATS]; | 110 | s64 count[MEM_CGROUP_STAT_NSTATS]; |
111 | unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; | ||
109 | }; | 112 | }; |
110 | 113 | ||
111 | /* | 114 | /* |
@@ -577,6 +580,22 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, | |||
577 | this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); | 580 | this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); |
578 | } | 581 | } |
579 | 582 | ||
583 | static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, | ||
584 | enum mem_cgroup_events_index idx) | ||
585 | { | ||
586 | unsigned long val = 0; | ||
587 | int cpu; | ||
588 | |||
589 | for_each_online_cpu(cpu) | ||
590 | val += per_cpu(mem->stat->events[idx], cpu); | ||
591 | #ifdef CONFIG_HOTPLUG_CPU | ||
592 | spin_lock(&mem->pcp_counter_lock); | ||
593 | val += mem->nocpu_base.events[idx]; | ||
594 | spin_unlock(&mem->pcp_counter_lock); | ||
595 | #endif | ||
596 | return val; | ||
597 | } | ||
598 | |||
580 | static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, | 599 | static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, |
581 | bool file, int nr_pages) | 600 | bool file, int nr_pages) |
582 | { | 601 | { |
@@ -589,13 +608,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, | |||
589 | 608 | ||
590 | /* pagein of a big page is an event. So, ignore page size */ | 609 | /* pagein of a big page is an event. So, ignore page size */ |
591 | if (nr_pages > 0) | 610 | if (nr_pages > 0) |
592 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); | 611 | __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); |
593 | else { | 612 | else { |
594 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); | 613 | __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); |
595 | nr_pages = -nr_pages; /* for event */ | 614 | nr_pages = -nr_pages; /* for event */ |
596 | } | 615 | } |
597 | 616 | ||
598 | __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages); | 617 | __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); |
599 | 618 | ||
600 | preempt_enable(); | 619 | preempt_enable(); |
601 | } | 620 | } |
@@ -617,9 +636,9 @@ static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, | |||
617 | 636 | ||
618 | static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift) | 637 | static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift) |
619 | { | 638 | { |
620 | s64 val; | 639 | unsigned long val; |
621 | 640 | ||
622 | val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]); | 641 | val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]); |
623 | 642 | ||
624 | return !(val & ((1 << event_mask_shift) - 1)); | 643 | return !(val & ((1 << event_mask_shift) - 1)); |
625 | } | 644 | } |
@@ -1773,6 +1792,12 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) | |||
1773 | per_cpu(mem->stat->count[i], cpu) = 0; | 1792 | per_cpu(mem->stat->count[i], cpu) = 0; |
1774 | mem->nocpu_base.count[i] += x; | 1793 | mem->nocpu_base.count[i] += x; |
1775 | } | 1794 | } |
1795 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { | ||
1796 | unsigned long x = per_cpu(mem->stat->events[i], cpu); | ||
1797 | |||
1798 | per_cpu(mem->stat->events[i], cpu) = 0; | ||
1799 | mem->nocpu_base.events[i] += x; | ||
1800 | } | ||
1776 | /* need to clear ON_MOVE value, works as a kind of lock. */ | 1801 | /* need to clear ON_MOVE value, works as a kind of lock. */ |
1777 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; | 1802 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; |
1778 | spin_unlock(&mem->pcp_counter_lock); | 1803 | spin_unlock(&mem->pcp_counter_lock); |
@@ -3725,9 +3750,9 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | |||
3725 | s->stat[MCS_RSS] += val * PAGE_SIZE; | 3750 | s->stat[MCS_RSS] += val * PAGE_SIZE; |
3726 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); | 3751 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); |
3727 | s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; | 3752 | s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; |
3728 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT); | 3753 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN); |
3729 | s->stat[MCS_PGPGIN] += val; | 3754 | s->stat[MCS_PGPGIN] += val; |
3730 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT); | 3755 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT); |
3731 | s->stat[MCS_PGPGOUT] += val; | 3756 | s->stat[MCS_PGPGOUT] += val; |
3732 | if (do_swap_account) { | 3757 | if (do_swap_account) { |
3733 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); | 3758 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); |