aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmstat.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c70
1 files changed, 20 insertions, 50 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index dc005a0c96ae..6c488d6ac425 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -13,39 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15 15
16void __get_zone_counts(unsigned long *active, unsigned long *inactive,
17 unsigned long *free, struct pglist_data *pgdat)
18{
19 struct zone *zones = pgdat->node_zones;
20 int i;
21
22 *active = 0;
23 *inactive = 0;
24 *free = 0;
25 for (i = 0; i < MAX_NR_ZONES; i++) {
26 *active += zones[i].nr_active;
27 *inactive += zones[i].nr_inactive;
28 *free += zones[i].free_pages;
29 }
30}
31
32void get_zone_counts(unsigned long *active,
33 unsigned long *inactive, unsigned long *free)
34{
35 struct pglist_data *pgdat;
36
37 *active = 0;
38 *inactive = 0;
39 *free = 0;
40 for_each_online_pgdat(pgdat) {
41 unsigned long l, m, n;
42 __get_zone_counts(&l, &m, &n, pgdat);
43 *active += l;
44 *inactive += m;
45 *free += n;
46 }
47}
48
49#ifdef CONFIG_VM_EVENT_COUNTERS 16#ifdef CONFIG_VM_EVENT_COUNTERS
50DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 17DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
51EXPORT_PER_CPU_SYMBOL(vm_event_states); 18EXPORT_PER_CPU_SYMBOL(vm_event_states);
@@ -239,7 +206,7 @@ EXPORT_SYMBOL(mod_zone_page_state);
239 * in between and therefore the atomicity vs. interrupt cannot be exploited 206 * in between and therefore the atomicity vs. interrupt cannot be exploited
240 * in a useful way here. 207 * in a useful way here.
241 */ 208 */
242static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 209void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
243{ 210{
244 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 211 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
245 s8 *p = pcp->vm_stat_diff + item; 212 s8 *p = pcp->vm_stat_diff + item;
@@ -260,9 +227,8 @@ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
260} 227}
261EXPORT_SYMBOL(__inc_zone_page_state); 228EXPORT_SYMBOL(__inc_zone_page_state);
262 229
263void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 230void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
264{ 231{
265 struct zone *zone = page_zone(page);
266 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 232 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
267 s8 *p = pcp->vm_stat_diff + item; 233 s8 *p = pcp->vm_stat_diff + item;
268 234
@@ -275,6 +241,11 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
275 *p = overstep; 241 *p = overstep;
276 } 242 }
277} 243}
244
245void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
246{
247 __dec_zone_state(page_zone(page), item);
248}
278EXPORT_SYMBOL(__dec_zone_page_state); 249EXPORT_SYMBOL(__dec_zone_page_state);
279 250
280void inc_zone_state(struct zone *zone, enum zone_stat_item item) 251void inc_zone_state(struct zone *zone, enum zone_stat_item item)
@@ -437,6 +408,12 @@ const struct seq_operations fragmentation_op = {
437 .show = frag_show, 408 .show = frag_show,
438}; 409};
439 410
411#ifdef CONFIG_ZONE_DMA
412#define TEXT_FOR_DMA(xx) xx "_dma",
413#else
414#define TEXT_FOR_DMA(xx)
415#endif
416
440#ifdef CONFIG_ZONE_DMA32 417#ifdef CONFIG_ZONE_DMA32
441#define TEXT_FOR_DMA32(xx) xx "_dma32", 418#define TEXT_FOR_DMA32(xx) xx "_dma32",
442#else 419#else
@@ -449,19 +426,22 @@ const struct seq_operations fragmentation_op = {
449#define TEXT_FOR_HIGHMEM(xx) 426#define TEXT_FOR_HIGHMEM(xx)
450#endif 427#endif
451 428
452#define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \ 429#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
453 TEXT_FOR_HIGHMEM(xx) 430 TEXT_FOR_HIGHMEM(xx)
454 431
455static const char * const vmstat_text[] = { 432static const char * const vmstat_text[] = {
456 /* Zoned VM counters */ 433 /* Zoned VM counters */
434 "nr_free_pages",
435 "nr_active",
436 "nr_inactive",
457 "nr_anon_pages", 437 "nr_anon_pages",
458 "nr_mapped", 438 "nr_mapped",
459 "nr_file_pages", 439 "nr_file_pages",
440 "nr_dirty",
441 "nr_writeback",
460 "nr_slab_reclaimable", 442 "nr_slab_reclaimable",
461 "nr_slab_unreclaimable", 443 "nr_slab_unreclaimable",
462 "nr_page_table_pages", 444 "nr_page_table_pages",
463 "nr_dirty",
464 "nr_writeback",
465 "nr_unstable", 445 "nr_unstable",
466 "nr_bounce", 446 "nr_bounce",
467 "nr_vmscan_write", 447 "nr_vmscan_write",
@@ -529,17 +509,13 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
529 "\n min %lu" 509 "\n min %lu"
530 "\n low %lu" 510 "\n low %lu"
531 "\n high %lu" 511 "\n high %lu"
532 "\n active %lu"
533 "\n inactive %lu"
534 "\n scanned %lu (a: %lu i: %lu)" 512 "\n scanned %lu (a: %lu i: %lu)"
535 "\n spanned %lu" 513 "\n spanned %lu"
536 "\n present %lu", 514 "\n present %lu",
537 zone->free_pages, 515 zone_page_state(zone, NR_FREE_PAGES),
538 zone->pages_min, 516 zone->pages_min,
539 zone->pages_low, 517 zone->pages_low,
540 zone->pages_high, 518 zone->pages_high,
541 zone->nr_active,
542 zone->nr_inactive,
543 zone->pages_scanned, 519 zone->pages_scanned,
544 zone->nr_scan_active, zone->nr_scan_inactive, 520 zone->nr_scan_active, zone->nr_scan_inactive,
545 zone->spanned_pages, 521 zone->spanned_pages,
@@ -563,12 +539,6 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
563 539
564 pageset = zone_pcp(zone, i); 540 pageset = zone_pcp(zone, i);
565 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 541 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
566 if (pageset->pcp[j].count)
567 break;
568 }
569 if (j == ARRAY_SIZE(pageset->pcp))
570 continue;
571 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
572 seq_printf(m, 542 seq_printf(m,
573 "\n cpu: %i pcp: %i" 543 "\n cpu: %i pcp: %i"
574 "\n count: %i" 544 "\n count: %i"