diff options
Diffstat (limited to 'include/linux/vmstat.h')
| -rw-r--r-- | include/linux/vmstat.h | 34 |
1 files changed, 30 insertions, 4 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index ee03bba9c5df..eaaea37b3b75 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -43,6 +43,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | 43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, |
| 44 | KSWAPD_SKIP_CONGESTION_WAIT, | 44 | KSWAPD_SKIP_CONGESTION_WAIT, |
| 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
| 46 | #ifdef CONFIG_COMPACTION | ||
| 47 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | ||
| 48 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | ||
| 49 | #endif | ||
| 46 | #ifdef CONFIG_HUGETLB_PAGE | 50 | #ifdef CONFIG_HUGETLB_PAGE |
| 47 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | 51 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
| 48 | #endif | 52 | #endif |
| @@ -78,22 +82,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); | |||
| 78 | 82 | ||
| 79 | static inline void __count_vm_event(enum vm_event_item item) | 83 | static inline void __count_vm_event(enum vm_event_item item) |
| 80 | { | 84 | { |
| 81 | __this_cpu_inc(per_cpu_var(vm_event_states).event[item]); | 85 | __this_cpu_inc(vm_event_states.event[item]); |
| 82 | } | 86 | } |
| 83 | 87 | ||
| 84 | static inline void count_vm_event(enum vm_event_item item) | 88 | static inline void count_vm_event(enum vm_event_item item) |
| 85 | { | 89 | { |
| 86 | this_cpu_inc(per_cpu_var(vm_event_states).event[item]); | 90 | this_cpu_inc(vm_event_states.event[item]); |
| 87 | } | 91 | } |
| 88 | 92 | ||
| 89 | static inline void __count_vm_events(enum vm_event_item item, long delta) | 93 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 90 | { | 94 | { |
| 91 | __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); | 95 | __this_cpu_add(vm_event_states.event[item], delta); |
| 92 | } | 96 | } |
| 93 | 97 | ||
| 94 | static inline void count_vm_events(enum vm_event_item item, long delta) | 98 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 95 | { | 99 | { |
| 96 | this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); | 100 | this_cpu_add(vm_event_states.event[item], delta); |
| 97 | } | 101 | } |
| 98 | 102 | ||
| 99 | extern void all_vm_events(unsigned long *); | 103 | extern void all_vm_events(unsigned long *); |
| @@ -166,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone, | |||
| 166 | return x; | 170 | return x; |
| 167 | } | 171 | } |
| 168 | 172 | ||
| 173 | /* | ||
| 174 | * More accurate version that also considers the currently pending | ||
| 175 | * deltas. For that we need to loop over all cpus to find the current | ||
| 176 | * deltas. There is no synchronization so the result cannot be | ||
| 177 | * exactly accurate either. | ||
| 178 | */ | ||
| 179 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | ||
| 180 | enum zone_stat_item item) | ||
| 181 | { | ||
| 182 | long x = atomic_long_read(&zone->vm_stat[item]); | ||
| 183 | |||
| 184 | #ifdef CONFIG_SMP | ||
| 185 | int cpu; | ||
| 186 | for_each_online_cpu(cpu) | ||
| 187 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | ||
| 188 | |||
| 189 | if (x < 0) | ||
| 190 | x = 0; | ||
| 191 | #endif | ||
| 192 | return x; | ||
| 193 | } | ||
| 194 | |||
| 169 | extern unsigned long global_reclaimable_pages(void); | 195 | extern unsigned long global_reclaimable_pages(void); |
| 170 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | 196 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
| 171 | 197 | ||
