aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 04:55:45 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 14:25:36 -0400
commitf8891e5e1f93a128c3900f82035e8541357896a7 (patch)
tree97b078ac97970962b17c85d39fd64cb48dc01168 /mm
parentca889e6c45e0b112cb2ca9d35afc66297519b5d5 (diff)
[PATCH] Light weight event counters
The remaining counters in page_state after the zoned VM counter patches have been applied are all just for show in /proc/vmstat. They have no essential function for the VM. We use a simple increment of per cpu variables. In order to avoid the most severe races we disable preempt. Preempt does not prevent the race between an increment and an interrupt handler incrementing the same statistics counter. However, that race is exceedingly rare, we may only loose one increment or so and there is no requirement (at least not in kernel) that the vm event counters have to be accurate. In the non preempt case this results in a simple increment for each counter. For many architectures this will be reduced by the compiler to a single instruction. This single instruction is atomic for i386 and x86_64. And therefore even the rare race condition in an interrupt is avoided for both architectures in most cases. The patchset also adds an off switch for embedded systems that allows a building of linux kernels without these counters. The implementation of these counters is through inline code that hopefully results in only a single instruction increment instruction being emitted (i386, x86_64) or in the increment being hidden though instruction concurrency (EPIC architectures such as ia64 can get that done). Benefits: - VM event counter operations usually reduce to a single inline instruction on i386 and x86_64. - No interrupt disable, only preempt disable for the preempt case. Preempt disable can also be avoided by moving the counter into a spinlock. - Handling is similar to zoned VM counters. - Simple and easily extendable. - Can be omitted to reduce memory use for embedded use. References: RFC http://marc.theaimsgroup.com/?l=linux-kernel&m=113512330605497&w=2 RFC http://marc.theaimsgroup.com/?l=linux-kernel&m=114988082814934&w=2 local_t http://marc.theaimsgroup.com/?l=linux-kernel&m=114991748606690&w=2 V2 http://marc.theaimsgroup.com/?t=115014808400007&r=1&w=2 V3 http://marc.theaimsgroup.com/?l=linux-kernel&m=115024767022346&w=2 V4 http://marc.theaimsgroup.com/?l=linux-kernel&m=115047968808926&w=2 Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/memory.c4
-rw-r--r--mm/page_alloc.c21
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swap.c4
-rw-r--r--mm/vmscan.c23
-rw-r--r--mm/vmstat.c171
8 files changed, 114 insertions, 121 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 87d62c44c3f0..796a5471b495 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1416,7 +1416,7 @@ retry_find:
1416 */ 1416 */
1417 if (!did_readaround) { 1417 if (!did_readaround) {
1418 majmin = VM_FAULT_MAJOR; 1418 majmin = VM_FAULT_MAJOR;
1419 inc_page_state(pgmajfault); 1419 count_vm_event(PGMAJFAULT);
1420 } 1420 }
1421 did_readaround = 1; 1421 did_readaround = 1;
1422 ra_pages = max_sane_readahead(file->f_ra.ra_pages); 1422 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
@@ -1487,7 +1487,7 @@ no_cached_page:
1487page_not_uptodate: 1487page_not_uptodate:
1488 if (!did_readaround) { 1488 if (!did_readaround) {
1489 majmin = VM_FAULT_MAJOR; 1489 majmin = VM_FAULT_MAJOR;
1490 inc_page_state(pgmajfault); 1490 count_vm_event(PGMAJFAULT);
1491 } 1491 }
1492 lock_page(page); 1492 lock_page(page);
1493 1493
diff --git a/mm/memory.c b/mm/memory.c
index 1a78791590fa..7e2a4b1580e3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1951,7 +1951,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1951 1951
1952 /* Had to read the page from swap area: Major fault */ 1952 /* Had to read the page from swap area: Major fault */
1953 ret = VM_FAULT_MAJOR; 1953 ret = VM_FAULT_MAJOR;
1954 inc_page_state(pgmajfault); 1954 count_vm_event(PGMAJFAULT);
1955 grab_swap_token(); 1955 grab_swap_token();
1956 } 1956 }
1957 1957
@@ -2324,7 +2324,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2324 2324
2325 __set_current_state(TASK_RUNNING); 2325 __set_current_state(TASK_RUNNING);
2326 2326
2327 inc_page_state(pgfault); 2327 count_vm_event(PGFAULT);
2328 2328
2329 if (unlikely(is_vm_hugetlb_page(vma))) 2329 if (unlikely(is_vm_hugetlb_page(vma)))
2330 return hugetlb_fault(mm, vma, address, write_access); 2330 return hugetlb_fault(mm, vma, address, write_access);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d61671260f92..30b0b97ad023 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -456,7 +456,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
456 456
457 kernel_map_pages(page, 1 << order, 0); 457 kernel_map_pages(page, 1 << order, 0);
458 local_irq_save(flags); 458 local_irq_save(flags);
459 __mod_page_state(pgfree, 1 << order); 459 __count_vm_events(PGFREE, 1 << order);
460 free_one_page(page_zone(page), page, order); 460 free_one_page(page_zone(page), page, order);
461 local_irq_restore(flags); 461 local_irq_restore(flags);
462} 462}
@@ -729,7 +729,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
729 729
730 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 730 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
731 local_irq_save(flags); 731 local_irq_save(flags);
732 __inc_page_state(pgfree); 732 __count_vm_event(PGFREE);
733 list_add(&page->lru, &pcp->list); 733 list_add(&page->lru, &pcp->list);
734 pcp->count++; 734 pcp->count++;
735 if (pcp->count >= pcp->high) { 735 if (pcp->count >= pcp->high) {
@@ -805,7 +805,7 @@ again:
805 goto failed; 805 goto failed;
806 } 806 }
807 807
808 __mod_page_state_zone(zone, pgalloc, 1 << order); 808 __count_zone_vm_events(PGALLOC, zone, 1 << order);
809 zone_statistics(zonelist, zone); 809 zone_statistics(zonelist, zone);
810 local_irq_restore(flags); 810 local_irq_restore(flags);
811 put_cpu(); 811 put_cpu();
@@ -2101,24 +2101,11 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
2101 unsigned long action, void *hcpu) 2101 unsigned long action, void *hcpu)
2102{ 2102{
2103 int cpu = (unsigned long)hcpu; 2103 int cpu = (unsigned long)hcpu;
2104 unsigned long *src, *dest;
2105 2104
2106 if (action == CPU_DEAD) { 2105 if (action == CPU_DEAD) {
2107 int i;
2108
2109 local_irq_disable(); 2106 local_irq_disable();
2110 __drain_pages(cpu); 2107 __drain_pages(cpu);
2111 2108 vm_events_fold_cpu(cpu);
2112 /* Add dead cpu's page_states to our own. */
2113 dest = (unsigned long *)&__get_cpu_var(page_states);
2114 src = (unsigned long *)&per_cpu(page_states, cpu);
2115
2116 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
2117 i++) {
2118 dest[i] += src[i];
2119 src[i] = 0;
2120 }
2121
2122 local_irq_enable(); 2109 local_irq_enable();
2123 refresh_cpu_vm_stats(cpu); 2110 refresh_cpu_vm_stats(cpu);
2124 } 2111 }
diff --git a/mm/page_io.c b/mm/page_io.c
index bb2b0d53889c..88029948d00a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -101,7 +101,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
101 } 101 }
102 if (wbc->sync_mode == WB_SYNC_ALL) 102 if (wbc->sync_mode == WB_SYNC_ALL)
103 rw |= (1 << BIO_RW_SYNC); 103 rw |= (1 << BIO_RW_SYNC);
104 inc_page_state(pswpout); 104 count_vm_event(PSWPOUT);
105 set_page_writeback(page); 105 set_page_writeback(page);
106 unlock_page(page); 106 unlock_page(page);
107 submit_bio(rw, bio); 107 submit_bio(rw, bio);
@@ -123,7 +123,7 @@ int swap_readpage(struct file *file, struct page *page)
123 ret = -ENOMEM; 123 ret = -ENOMEM;
124 goto out; 124 goto out;
125 } 125 }
126 inc_page_state(pswpin); 126 count_vm_event(PSWPIN);
127 submit_bio(READ, bio); 127 submit_bio(READ, bio);
128out: 128out:
129 return ret; 129 return ret;
diff --git a/mm/shmem.c b/mm/shmem.c
index b14ff817d162..a9c09e0ba709 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1045,12 +1045,12 @@ repeat:
1045 swappage = lookup_swap_cache(swap); 1045 swappage = lookup_swap_cache(swap);
1046 if (!swappage) { 1046 if (!swappage) {
1047 shmem_swp_unmap(entry); 1047 shmem_swp_unmap(entry);
1048 spin_unlock(&info->lock);
1049 /* here we actually do the io */ 1048 /* here we actually do the io */
1050 if (type && *type == VM_FAULT_MINOR) { 1049 if (type && *type == VM_FAULT_MINOR) {
1051 inc_page_state(pgmajfault); 1050 __count_vm_event(PGMAJFAULT);
1052 *type = VM_FAULT_MAJOR; 1051 *type = VM_FAULT_MAJOR;
1053 } 1052 }
1053 spin_unlock(&info->lock);
1054 swappage = shmem_swapin(info, swap, idx); 1054 swappage = shmem_swapin(info, swap, idx);
1055 if (!swappage) { 1055 if (!swappage) {
1056 spin_lock(&info->lock); 1056 spin_lock(&info->lock);
diff --git a/mm/swap.c b/mm/swap.c
index 990868afc1c6..8fd095c4ae51 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -87,7 +87,7 @@ int rotate_reclaimable_page(struct page *page)
87 spin_lock_irqsave(&zone->lru_lock, flags); 87 spin_lock_irqsave(&zone->lru_lock, flags);
88 if (PageLRU(page) && !PageActive(page)) { 88 if (PageLRU(page) && !PageActive(page)) {
89 list_move_tail(&page->lru, &zone->inactive_list); 89 list_move_tail(&page->lru, &zone->inactive_list);
90 inc_page_state(pgrotated); 90 __count_vm_event(PGROTATED);
91 } 91 }
92 if (!test_clear_page_writeback(page)) 92 if (!test_clear_page_writeback(page))
93 BUG(); 93 BUG();
@@ -107,7 +107,7 @@ void fastcall activate_page(struct page *page)
107 del_page_from_inactive_list(zone, page); 107 del_page_from_inactive_list(zone, page);
108 SetPageActive(page); 108 SetPageActive(page);
109 add_page_to_active_list(zone, page); 109 add_page_to_active_list(zone, page);
110 inc_page_state(pgactivate); 110 __count_vm_event(PGACTIVATE);
111 } 111 }
112 spin_unlock_irq(&zone->lru_lock); 112 spin_unlock_irq(&zone->lru_lock);
113} 113}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d6942436ac97..ff2ebe9458a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -215,7 +215,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
215 break; 215 break;
216 if (shrink_ret < nr_before) 216 if (shrink_ret < nr_before)
217 ret += nr_before - shrink_ret; 217 ret += nr_before - shrink_ret;
218 mod_page_state(slabs_scanned, this_scan); 218 count_vm_events(SLABS_SCANNED, this_scan);
219 total_scan -= this_scan; 219 total_scan -= this_scan;
220 220
221 cond_resched(); 221 cond_resched();
@@ -569,7 +569,7 @@ keep:
569 list_splice(&ret_pages, page_list); 569 list_splice(&ret_pages, page_list);
570 if (pagevec_count(&freed_pvec)) 570 if (pagevec_count(&freed_pvec))
571 __pagevec_release_nonlru(&freed_pvec); 571 __pagevec_release_nonlru(&freed_pvec);
572 mod_page_state(pgactivate, pgactivate); 572 count_vm_events(PGACTIVATE, pgactivate);
573 return nr_reclaimed; 573 return nr_reclaimed;
574} 574}
575 575
@@ -659,11 +659,11 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
659 nr_reclaimed += nr_freed; 659 nr_reclaimed += nr_freed;
660 local_irq_disable(); 660 local_irq_disable();
661 if (current_is_kswapd()) { 661 if (current_is_kswapd()) {
662 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 662 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
663 __mod_page_state(kswapd_steal, nr_freed); 663 __count_vm_events(KSWAPD_STEAL, nr_freed);
664 } else 664 } else
665 __mod_page_state_zone(zone, pgscan_direct, nr_scan); 665 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
666 __mod_page_state_zone(zone, pgsteal, nr_freed); 666 __count_vm_events(PGACTIVATE, nr_freed);
667 667
668 if (nr_taken == 0) 668 if (nr_taken == 0)
669 goto done; 669 goto done;
@@ -841,11 +841,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
841 } 841 }
842 } 842 }
843 zone->nr_active += pgmoved; 843 zone->nr_active += pgmoved;
844 spin_unlock(&zone->lru_lock);
845 844
846 __mod_page_state_zone(zone, pgrefill, pgscanned); 845 __count_zone_vm_events(PGREFILL, zone, pgscanned);
847 __mod_page_state(pgdeactivate, pgdeactivate); 846 __count_vm_events(PGDEACTIVATE, pgdeactivate);
848 local_irq_enable(); 847 spin_unlock_irq(&zone->lru_lock);
849 848
850 pagevec_release(&pvec); 849 pagevec_release(&pvec);
851} 850}
@@ -977,7 +976,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
977 .swappiness = vm_swappiness, 976 .swappiness = vm_swappiness,
978 }; 977 };
979 978
980 inc_page_state(allocstall); 979 count_vm_event(ALLOCSTALL);
981 980
982 for (i = 0; zones[i] != NULL; i++) { 981 for (i = 0; zones[i] != NULL; i++) {
983 struct zone *zone = zones[i]; 982 struct zone *zone = zones[i];
@@ -1074,7 +1073,7 @@ loop_again:
1074 total_scanned = 0; 1073 total_scanned = 0;
1075 nr_reclaimed = 0; 1074 nr_reclaimed = 0;
1076 sc.may_writepage = !laptop_mode; 1075 sc.may_writepage = !laptop_mode;
1077 inc_page_state(pageoutrun); 1076 count_vm_event(PAGEOUTRUN);
1078 1077
1079 for (i = 0; i < pgdat->nr_zones; i++) { 1078 for (i = 0; i < pgdat->nr_zones; i++) {
1080 struct zone *zone = pgdat->node_zones + i; 1079 struct zone *zone = pgdat->node_zones + i;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index ee7f89666250..73b83d67bab6 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -13,66 +13,6 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16/*
17 * Accumulate the page_state information across all CPUs.
18 * The result is unavoidably approximate - it can change
19 * during and after execution of this function.
20 */
21DEFINE_PER_CPU(struct page_state, page_states) = {0};
22
23static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
24{
25 unsigned cpu;
26
27 memset(ret, 0, nr * sizeof(unsigned long));
28 cpus_and(*cpumask, *cpumask, cpu_online_map);
29
30 for_each_cpu_mask(cpu, *cpumask) {
31 unsigned long *in;
32 unsigned long *out;
33 unsigned off;
34 unsigned next_cpu;
35
36 in = (unsigned long *)&per_cpu(page_states, cpu);
37
38 next_cpu = next_cpu(cpu, *cpumask);
39 if (likely(next_cpu < NR_CPUS))
40 prefetch(&per_cpu(page_states, next_cpu));
41
42 out = (unsigned long *)ret;
43 for (off = 0; off < nr; off++)
44 *out++ += *in++;
45 }
46}
47
48void get_full_page_state(struct page_state *ret)
49{
50 cpumask_t mask = CPU_MASK_ALL;
51
52 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
53}
54
55void __mod_page_state_offset(unsigned long offset, unsigned long delta)
56{
57 void *ptr;
58
59 ptr = &__get_cpu_var(page_states);
60 *(unsigned long *)(ptr + offset) += delta;
61}
62EXPORT_SYMBOL(__mod_page_state_offset);
63
64void mod_page_state_offset(unsigned long offset, unsigned long delta)
65{
66 unsigned long flags;
67 void *ptr;
68
69 local_irq_save(flags);
70 ptr = &__get_cpu_var(page_states);
71 *(unsigned long *)(ptr + offset) += delta;
72 local_irq_restore(flags);
73}
74EXPORT_SYMBOL(mod_page_state_offset);
75
76void __get_zone_counts(unsigned long *active, unsigned long *inactive, 16void __get_zone_counts(unsigned long *active, unsigned long *inactive,
77 unsigned long *free, struct pglist_data *pgdat) 17 unsigned long *free, struct pglist_data *pgdat)
78{ 18{
@@ -106,6 +46,63 @@ void get_zone_counts(unsigned long *active,
106 } 46 }
107} 47}
108 48
49#ifdef CONFIG_VM_EVENT_COUNTERS
50DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
51EXPORT_PER_CPU_SYMBOL(vm_event_states);
52
53static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
54{
55 int cpu = 0;
56 int i;
57
58 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
59
60 cpu = first_cpu(*cpumask);
61 while (cpu < NR_CPUS) {
62 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
63
64 cpu = next_cpu(cpu, *cpumask);
65
66 if (cpu < NR_CPUS)
67 prefetch(&per_cpu(vm_event_states, cpu));
68
69
70 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
71 ret[i] += this->event[i];
72 }
73}
74
75/*
76 * Accumulate the vm event counters across all CPUs.
77 * The result is unavoidably approximate - it can change
78 * during and after execution of this function.
79*/
80void all_vm_events(unsigned long *ret)
81{
82 sum_vm_events(ret, &cpu_online_map);
83}
84
85#ifdef CONFIG_HOTPLUG
86/*
87 * Fold the foreign cpu events into our own.
88 *
89 * This is adding to the events on one processor
90 * but keeps the global counts constant.
91 */
92void vm_events_fold_cpu(int cpu)
93{
94 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
95 int i;
96
97 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
98 count_vm_events(i, fold_state->event[i]);
99 fold_state->event[i] = 0;
100 }
101}
102#endif /* CONFIG_HOTPLUG */
103
104#endif /* CONFIG_VM_EVENT_COUNTERS */
105
109/* 106/*
110 * Manage combined zone based / global counters 107 * Manage combined zone based / global counters
111 * 108 *
@@ -405,16 +402,16 @@ static char *vmstat_text[] = {
405 "numa_other", 402 "numa_other",
406#endif 403#endif
407 404
408 /* Event counters */ 405#ifdef CONFIG_VM_EVENT_COUNTERS
409 "pgpgin", 406 "pgpgin",
410 "pgpgout", 407 "pgpgout",
411 "pswpin", 408 "pswpin",
412 "pswpout", 409 "pswpout",
413 410
414 "pgalloc_high",
415 "pgalloc_normal",
416 "pgalloc_dma32",
417 "pgalloc_dma", 411 "pgalloc_dma",
412 "pgalloc_dma32",
413 "pgalloc_normal",
414 "pgalloc_high",
418 415
419 "pgfree", 416 "pgfree",
420 "pgactivate", 417 "pgactivate",
@@ -423,25 +420,25 @@ static char *vmstat_text[] = {
423 "pgfault", 420 "pgfault",
424 "pgmajfault", 421 "pgmajfault",
425 422
426 "pgrefill_high",
427 "pgrefill_normal",
428 "pgrefill_dma32",
429 "pgrefill_dma", 423 "pgrefill_dma",
424 "pgrefill_dma32",
425 "pgrefill_normal",
426 "pgrefill_high",
430 427
431 "pgsteal_high",
432 "pgsteal_normal",
433 "pgsteal_dma32",
434 "pgsteal_dma", 428 "pgsteal_dma",
429 "pgsteal_dma32",
430 "pgsteal_normal",
431 "pgsteal_high",
435 432
436 "pgscan_kswapd_high",
437 "pgscan_kswapd_normal",
438 "pgscan_kswapd_dma32",
439 "pgscan_kswapd_dma", 433 "pgscan_kswapd_dma",
434 "pgscan_kswapd_dma32",
435 "pgscan_kswapd_normal",
436 "pgscan_kswapd_high",
440 437
441 "pgscan_direct_high",
442 "pgscan_direct_normal",
443 "pgscan_direct_dma32",
444 "pgscan_direct_dma", 438 "pgscan_direct_dma",
439 "pgscan_direct_dma32",
440 "pgscan_direct_normal",
441 "pgscan_direct_high",
445 442
446 "pginodesteal", 443 "pginodesteal",
447 "slabs_scanned", 444 "slabs_scanned",
@@ -451,6 +448,7 @@ static char *vmstat_text[] = {
451 "allocstall", 448 "allocstall",
452 449
453 "pgrotated", 450 "pgrotated",
451#endif
454}; 452};
455 453
456/* 454/*
@@ -553,23 +551,32 @@ struct seq_operations zoneinfo_op = {
553static void *vmstat_start(struct seq_file *m, loff_t *pos) 551static void *vmstat_start(struct seq_file *m, loff_t *pos)
554{ 552{
555 unsigned long *v; 553 unsigned long *v;
556 struct page_state *ps; 554#ifdef CONFIG_VM_EVENT_COUNTERS
555 unsigned long *e;
556#endif
557 int i; 557 int i;
558 558
559 if (*pos >= ARRAY_SIZE(vmstat_text)) 559 if (*pos >= ARRAY_SIZE(vmstat_text))
560 return NULL; 560 return NULL;
561 561
562#ifdef CONFIG_VM_EVENT_COUNTERS
562 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) 563 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
563 + sizeof(*ps), GFP_KERNEL); 564 + sizeof(struct vm_event_state), GFP_KERNEL);
565#else
566 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
567 GFP_KERNEL);
568#endif
564 m->private = v; 569 m->private = v;
565 if (!v) 570 if (!v)
566 return ERR_PTR(-ENOMEM); 571 return ERR_PTR(-ENOMEM);
567 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 572 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
568 v[i] = global_page_state(i); 573 v[i] = global_page_state(i);
569 ps = (struct page_state *)(v + NR_VM_ZONE_STAT_ITEMS); 574#ifdef CONFIG_VM_EVENT_COUNTERS
570 get_full_page_state(ps); 575 e = v + NR_VM_ZONE_STAT_ITEMS;
571 ps->pgpgin /= 2; /* sectors -> kbytes */ 576 all_vm_events(e);
572 ps->pgpgout /= 2; 577 e[PGPGIN] /= 2; /* sectors -> kbytes */
578 e[PGPGOUT] /= 2;
579#endif
573 return v + *pos; 580 return v + *pos;
574} 581}
575 582