aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMartin Hicks <mort@sgi.com>2005-09-03 18:55:11 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:05:49 -0400
commitc07e02db76940c75fc92f2f2c9adcdbb09ed70d0 (patch)
tree9d777784fd5e3658d8db5b01a965d4fc568ceb93 /mm
parente070ad49f31155d872d8e96cab2142840993e3c0 (diff)
[PATCH] VM: add page_state info to per-node meminfo
Add page_state info to the per-node meminfo file in sysfs. This is mostly just for informational purposes. The lack of this information was brought up recently during a discussion regarding pagecache clearing, and I put this patch together to test out one of the suggestions. It seems like interesting info to have, so I'm submitting the patch. Signed-off-by: Martin Hicks <mort@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d157dae8c9f3..b06a9636d971 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1130,19 +1130,20 @@ EXPORT_SYMBOL(nr_pagecache);
1130DEFINE_PER_CPU(long, nr_pagecache_local) = 0; 1130DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1131#endif 1131#endif
1132 1132
1133void __get_page_state(struct page_state *ret, int nr) 1133void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1134{ 1134{
1135 int cpu = 0; 1135 int cpu = 0;
1136 1136
1137 memset(ret, 0, sizeof(*ret)); 1137 memset(ret, 0, sizeof(*ret));
1138 cpus_and(*cpumask, *cpumask, cpu_online_map);
1138 1139
1139 cpu = first_cpu(cpu_online_map); 1140 cpu = first_cpu(*cpumask);
1140 while (cpu < NR_CPUS) { 1141 while (cpu < NR_CPUS) {
1141 unsigned long *in, *out, off; 1142 unsigned long *in, *out, off;
1142 1143
1143 in = (unsigned long *)&per_cpu(page_states, cpu); 1144 in = (unsigned long *)&per_cpu(page_states, cpu);
1144 1145
1145 cpu = next_cpu(cpu, cpu_online_map); 1146 cpu = next_cpu(cpu, *cpumask);
1146 1147
1147 if (cpu < NR_CPUS) 1148 if (cpu < NR_CPUS)
1148 prefetch(&per_cpu(page_states, cpu)); 1149 prefetch(&per_cpu(page_states, cpu));
@@ -1153,19 +1154,33 @@ void __get_page_state(struct page_state *ret, int nr)
1153 } 1154 }
1154} 1155}
1155 1156
1157void get_page_state_node(struct page_state *ret, int node)
1158{
1159 int nr;
1160 cpumask_t mask = node_to_cpumask(node);
1161
1162 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1163 nr /= sizeof(unsigned long);
1164
1165 __get_page_state(ret, nr+1, &mask);
1166}
1167
1156void get_page_state(struct page_state *ret) 1168void get_page_state(struct page_state *ret)
1157{ 1169{
1158 int nr; 1170 int nr;
1171 cpumask_t mask = CPU_MASK_ALL;
1159 1172
1160 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); 1173 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1161 nr /= sizeof(unsigned long); 1174 nr /= sizeof(unsigned long);
1162 1175
1163 __get_page_state(ret, nr + 1); 1176 __get_page_state(ret, nr + 1, &mask);
1164} 1177}
1165 1178
1166void get_full_page_state(struct page_state *ret) 1179void get_full_page_state(struct page_state *ret)
1167{ 1180{
1168 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long)); 1181 cpumask_t mask = CPU_MASK_ALL;
1182
1183 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
1169} 1184}
1170 1185
1171unsigned long __read_page_state(unsigned long offset) 1186unsigned long __read_page_state(unsigned long offset)