diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-s390/pgtable.h | 7 | ||||
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | include/linux/page-flags.h | 141 | ||||
-rw-r--r-- | include/linux/vmstat.h | 138 |
4 files changed, 151 insertions, 141 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 859b5e969826..24312387fa24 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h | |||
@@ -657,13 +657,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |||
657 | __pte; \ | 657 | __pte; \ |
658 | }) | 658 | }) |
659 | 659 | ||
660 | #define SetPageUptodate(_page) \ | ||
661 | do { \ | ||
662 | struct page *__page = (_page); \ | ||
663 | if (!test_and_set_bit(PG_uptodate, &__page->flags)) \ | ||
664 | page_test_and_clear_dirty(_page); \ | ||
665 | } while (0) | ||
666 | |||
667 | #ifdef __s390x__ | 660 | #ifdef __s390x__ |
668 | 661 | ||
669 | #define pfn_pmd(pfn, pgprot) \ | 662 | #define pfn_pmd(pfn, pgprot) \ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index c41a1299b8cf..75179529e399 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout; | |||
36 | #include <asm/page.h> | 36 | #include <asm/page.h> |
37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/atomic.h> | ||
40 | 39 | ||
41 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) | 40 | #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) |
42 | 41 | ||
@@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone, | |||
515 | set_page_section(page, pfn_to_section_nr(pfn)); | 514 | set_page_section(page, pfn_to_section_nr(pfn)); |
516 | } | 515 | } |
517 | 516 | ||
517 | /* | ||
518 | * Some inline functions in vmstat.h depend on page_zone() | ||
519 | */ | ||
520 | #include <linux/vmstat.h> | ||
521 | |||
518 | #ifndef CONFIG_DISCONTIGMEM | 522 | #ifndef CONFIG_DISCONTIGMEM |
519 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ | 523 | /* The array of struct pages - for discontigmem use pgdat->lmem_map */ |
520 | extern struct page *mem_map; | 524 | extern struct page *mem_map; |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0c076d58c676..ff235c4b79ea 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -5,12 +5,8 @@ | |||
5 | #ifndef PAGE_FLAGS_H | 5 | #ifndef PAGE_FLAGS_H |
6 | #define PAGE_FLAGS_H | 6 | #define PAGE_FLAGS_H |
7 | 7 | ||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/cache.h> | ||
10 | #include <linux/types.h> | 8 | #include <linux/types.h> |
11 | 9 | ||
12 | #include <asm/pgtable.h> | ||
13 | |||
14 | /* | 10 | /* |
15 | * Various page->flags bits: | 11 | * Various page->flags bits: |
16 | * | 12 | * |
@@ -103,134 +99,6 @@ | |||
103 | #endif | 99 | #endif |
104 | 100 | ||
105 | /* | 101 | /* |
106 | * Global page accounting. One instance per CPU. Only unsigned longs are | ||
107 | * allowed. | ||
108 | * | ||
109 | * - Fields can be modified with xxx_page_state and xxx_page_state_zone at | ||
110 | * any time safely (which protects the instance from modification by | ||
111 | * interrupt. | ||
112 | * - The __xxx_page_state variants can be used safely when interrupts are | ||
113 | * disabled. | ||
114 | * - The __xxx_page_state variants can be used if the field is only | ||
115 | * modified from process context and protected from preemption, or only | ||
116 | * modified from interrupt context. In this case, the field should be | ||
117 | * commented here. | ||
118 | */ | ||
119 | struct page_state { | ||
120 | unsigned long nr_dirty; /* Dirty writeable pages */ | ||
121 | unsigned long nr_writeback; /* Pages under writeback */ | ||
122 | unsigned long nr_unstable; /* NFS unstable pages */ | ||
123 | unsigned long nr_page_table_pages;/* Pages used for pagetables */ | ||
124 | unsigned long nr_mapped; /* mapped into pagetables. | ||
125 | * only modified from process context */ | ||
126 | unsigned long nr_slab; /* In slab */ | ||
127 | #define GET_PAGE_STATE_LAST nr_slab | ||
128 | |||
129 | /* | ||
130 | * The below are zeroed by get_page_state(). Use get_full_page_state() | ||
131 | * to add up all these. | ||
132 | */ | ||
133 | unsigned long pgpgin; /* Disk reads */ | ||
134 | unsigned long pgpgout; /* Disk writes */ | ||
135 | unsigned long pswpin; /* swap reads */ | ||
136 | unsigned long pswpout; /* swap writes */ | ||
137 | |||
138 | unsigned long pgalloc_high; /* page allocations */ | ||
139 | unsigned long pgalloc_normal; | ||
140 | unsigned long pgalloc_dma32; | ||
141 | unsigned long pgalloc_dma; | ||
142 | |||
143 | unsigned long pgfree; /* page freeings */ | ||
144 | unsigned long pgactivate; /* pages moved inactive->active */ | ||
145 | unsigned long pgdeactivate; /* pages moved active->inactive */ | ||
146 | |||
147 | unsigned long pgfault; /* faults (major+minor) */ | ||
148 | unsigned long pgmajfault; /* faults (major only) */ | ||
149 | |||
150 | unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ | ||
151 | unsigned long pgrefill_normal; | ||
152 | unsigned long pgrefill_dma32; | ||
153 | unsigned long pgrefill_dma; | ||
154 | |||
155 | unsigned long pgsteal_high; /* total highmem pages reclaimed */ | ||
156 | unsigned long pgsteal_normal; | ||
157 | unsigned long pgsteal_dma32; | ||
158 | unsigned long pgsteal_dma; | ||
159 | |||
160 | unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ | ||
161 | unsigned long pgscan_kswapd_normal; | ||
162 | unsigned long pgscan_kswapd_dma32; | ||
163 | unsigned long pgscan_kswapd_dma; | ||
164 | |||
165 | unsigned long pgscan_direct_high;/* total highmem pages scanned */ | ||
166 | unsigned long pgscan_direct_normal; | ||
167 | unsigned long pgscan_direct_dma32; | ||
168 | unsigned long pgscan_direct_dma; | ||
169 | |||
170 | unsigned long pginodesteal; /* pages reclaimed via inode freeing */ | ||
171 | unsigned long slabs_scanned; /* slab objects scanned */ | ||
172 | unsigned long kswapd_steal; /* pages reclaimed by kswapd */ | ||
173 | unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ | ||
174 | unsigned long pageoutrun; /* kswapd's calls to page reclaim */ | ||
175 | unsigned long allocstall; /* direct reclaim calls */ | ||
176 | |||
177 | unsigned long pgrotated; /* pages rotated to tail of the LRU */ | ||
178 | unsigned long nr_bounce; /* pages for bounce buffers */ | ||
179 | }; | ||
180 | |||
181 | extern void get_page_state(struct page_state *ret); | ||
182 | extern void get_page_state_node(struct page_state *ret, int node); | ||
183 | extern void get_full_page_state(struct page_state *ret); | ||
184 | extern unsigned long read_page_state_offset(unsigned long offset); | ||
185 | extern void mod_page_state_offset(unsigned long offset, unsigned long delta); | ||
186 | extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); | ||
187 | |||
188 | #define read_page_state(member) \ | ||
189 | read_page_state_offset(offsetof(struct page_state, member)) | ||
190 | |||
191 | #define mod_page_state(member, delta) \ | ||
192 | mod_page_state_offset(offsetof(struct page_state, member), (delta)) | ||
193 | |||
194 | #define __mod_page_state(member, delta) \ | ||
195 | __mod_page_state_offset(offsetof(struct page_state, member), (delta)) | ||
196 | |||
197 | #define inc_page_state(member) mod_page_state(member, 1UL) | ||
198 | #define dec_page_state(member) mod_page_state(member, 0UL - 1) | ||
199 | #define add_page_state(member,delta) mod_page_state(member, (delta)) | ||
200 | #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) | ||
201 | |||
202 | #define __inc_page_state(member) __mod_page_state(member, 1UL) | ||
203 | #define __dec_page_state(member) __mod_page_state(member, 0UL - 1) | ||
204 | #define __add_page_state(member,delta) __mod_page_state(member, (delta)) | ||
205 | #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) | ||
206 | |||
207 | #define page_state(member) (*__page_state(offsetof(struct page_state, member))) | ||
208 | |||
209 | #define state_zone_offset(zone, member) \ | ||
210 | ({ \ | ||
211 | unsigned offset; \ | ||
212 | if (is_highmem(zone)) \ | ||
213 | offset = offsetof(struct page_state, member##_high); \ | ||
214 | else if (is_normal(zone)) \ | ||
215 | offset = offsetof(struct page_state, member##_normal); \ | ||
216 | else if (is_dma32(zone)) \ | ||
217 | offset = offsetof(struct page_state, member##_dma32); \ | ||
218 | else \ | ||
219 | offset = offsetof(struct page_state, member##_dma); \ | ||
220 | offset; \ | ||
221 | }) | ||
222 | |||
223 | #define __mod_page_state_zone(zone, member, delta) \ | ||
224 | do { \ | ||
225 | __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ | ||
226 | } while (0) | ||
227 | |||
228 | #define mod_page_state_zone(zone, member, delta) \ | ||
229 | do { \ | ||
230 | mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ | ||
231 | } while (0) | ||
232 | |||
233 | /* | ||
234 | * Manipulation of page state flags | 102 | * Manipulation of page state flags |
235 | */ | 103 | */ |
236 | #define PageLocked(page) \ | 104 | #define PageLocked(page) \ |
@@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); | |||
254 | #define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) | 122 | #define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) |
255 | 123 | ||
256 | #define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) | 124 | #define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) |
257 | #ifndef SetPageUptodate | 125 | #ifdef CONFIG_S390 |
126 | #define SetPageUptodate(_page) \ | ||
127 | do { \ | ||
128 | struct page *__page = (_page); \ | ||
129 | if (!test_and_set_bit(PG_uptodate, &__page->flags)) \ | ||
130 | page_test_and_clear_dirty(_page); \ | ||
131 | } while (0) | ||
132 | #else | ||
258 | #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) | 133 | #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) |
259 | #endif | 134 | #endif |
260 | #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) | 135 | #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h new file mode 100644 index 000000000000..3ca0c1989fc2 --- /dev/null +++ b/include/linux/vmstat.h | |||
@@ -0,0 +1,138 @@ | |||
1 | #ifndef _LINUX_VMSTAT_H | ||
2 | #define _LINUX_VMSTAT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/percpu.h> | ||
6 | |||
7 | /* | ||
8 | * Global page accounting. One instance per CPU. Only unsigned longs are | ||
9 | * allowed. | ||
10 | * | ||
11 | * - Fields can be modified with xxx_page_state and xxx_page_state_zone at | ||
12 | * any time safely (which protects the instance from modification by | ||
13 | * interrupt. | ||
14 | * - The __xxx_page_state variants can be used safely when interrupts are | ||
15 | * disabled. | ||
16 | * - The __xxx_page_state variants can be used if the field is only | ||
17 | * modified from process context and protected from preemption, or only | ||
18 | * modified from interrupt context. In this case, the field should be | ||
19 | * commented here. | ||
20 | */ | ||
21 | struct page_state { | ||
22 | unsigned long nr_dirty; /* Dirty writeable pages */ | ||
23 | unsigned long nr_writeback; /* Pages under writeback */ | ||
24 | unsigned long nr_unstable; /* NFS unstable pages */ | ||
25 | unsigned long nr_page_table_pages;/* Pages used for pagetables */ | ||
26 | unsigned long nr_mapped; /* mapped into pagetables. | ||
27 | * only modified from process context */ | ||
28 | unsigned long nr_slab; /* In slab */ | ||
29 | #define GET_PAGE_STATE_LAST nr_slab | ||
30 | |||
31 | /* | ||
32 | * The below are zeroed by get_page_state(). Use get_full_page_state() | ||
33 | * to add up all these. | ||
34 | */ | ||
35 | unsigned long pgpgin; /* Disk reads */ | ||
36 | unsigned long pgpgout; /* Disk writes */ | ||
37 | unsigned long pswpin; /* swap reads */ | ||
38 | unsigned long pswpout; /* swap writes */ | ||
39 | |||
40 | unsigned long pgalloc_high; /* page allocations */ | ||
41 | unsigned long pgalloc_normal; | ||
42 | unsigned long pgalloc_dma32; | ||
43 | unsigned long pgalloc_dma; | ||
44 | |||
45 | unsigned long pgfree; /* page freeings */ | ||
46 | unsigned long pgactivate; /* pages moved inactive->active */ | ||
47 | unsigned long pgdeactivate; /* pages moved active->inactive */ | ||
48 | |||
49 | unsigned long pgfault; /* faults (major+minor) */ | ||
50 | unsigned long pgmajfault; /* faults (major only) */ | ||
51 | |||
52 | unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ | ||
53 | unsigned long pgrefill_normal; | ||
54 | unsigned long pgrefill_dma32; | ||
55 | unsigned long pgrefill_dma; | ||
56 | |||
57 | unsigned long pgsteal_high; /* total highmem pages reclaimed */ | ||
58 | unsigned long pgsteal_normal; | ||
59 | unsigned long pgsteal_dma32; | ||
60 | unsigned long pgsteal_dma; | ||
61 | |||
62 | unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ | ||
63 | unsigned long pgscan_kswapd_normal; | ||
64 | unsigned long pgscan_kswapd_dma32; | ||
65 | unsigned long pgscan_kswapd_dma; | ||
66 | |||
67 | unsigned long pgscan_direct_high;/* total highmem pages scanned */ | ||
68 | unsigned long pgscan_direct_normal; | ||
69 | unsigned long pgscan_direct_dma32; | ||
70 | unsigned long pgscan_direct_dma; | ||
71 | |||
72 | unsigned long pginodesteal; /* pages reclaimed via inode freeing */ | ||
73 | unsigned long slabs_scanned; /* slab objects scanned */ | ||
74 | unsigned long kswapd_steal; /* pages reclaimed by kswapd */ | ||
75 | unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ | ||
76 | unsigned long pageoutrun; /* kswapd's calls to page reclaim */ | ||
77 | unsigned long allocstall; /* direct reclaim calls */ | ||
78 | |||
79 | unsigned long pgrotated; /* pages rotated to tail of the LRU */ | ||
80 | unsigned long nr_bounce; /* pages for bounce buffers */ | ||
81 | }; | ||
82 | |||
83 | extern void get_page_state(struct page_state *ret); | ||
84 | extern void get_page_state_node(struct page_state *ret, int node); | ||
85 | extern void get_full_page_state(struct page_state *ret); | ||
86 | extern unsigned long read_page_state_offset(unsigned long offset); | ||
87 | extern void mod_page_state_offset(unsigned long offset, unsigned long delta); | ||
88 | extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); | ||
89 | |||
90 | #define read_page_state(member) \ | ||
91 | read_page_state_offset(offsetof(struct page_state, member)) | ||
92 | |||
93 | #define mod_page_state(member, delta) \ | ||
94 | mod_page_state_offset(offsetof(struct page_state, member), (delta)) | ||
95 | |||
96 | #define __mod_page_state(member, delta) \ | ||
97 | __mod_page_state_offset(offsetof(struct page_state, member), (delta)) | ||
98 | |||
99 | #define inc_page_state(member) mod_page_state(member, 1UL) | ||
100 | #define dec_page_state(member) mod_page_state(member, 0UL - 1) | ||
101 | #define add_page_state(member,delta) mod_page_state(member, (delta)) | ||
102 | #define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) | ||
103 | |||
104 | #define __inc_page_state(member) __mod_page_state(member, 1UL) | ||
105 | #define __dec_page_state(member) __mod_page_state(member, 0UL - 1) | ||
106 | #define __add_page_state(member,delta) __mod_page_state(member, (delta)) | ||
107 | #define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) | ||
108 | |||
109 | #define page_state(member) (*__page_state(offsetof(struct page_state, member))) | ||
110 | |||
111 | #define state_zone_offset(zone, member) \ | ||
112 | ({ \ | ||
113 | unsigned offset; \ | ||
114 | if (is_highmem(zone)) \ | ||
115 | offset = offsetof(struct page_state, member##_high); \ | ||
116 | else if (is_normal(zone)) \ | ||
117 | offset = offsetof(struct page_state, member##_normal); \ | ||
118 | else if (is_dma32(zone)) \ | ||
119 | offset = offsetof(struct page_state, member##_dma32); \ | ||
120 | else \ | ||
121 | offset = offsetof(struct page_state, member##_dma); \ | ||
122 | offset; \ | ||
123 | }) | ||
124 | |||
125 | #define __mod_page_state_zone(zone, member, delta) \ | ||
126 | do { \ | ||
127 | __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ | ||
128 | } while (0) | ||
129 | |||
130 | #define mod_page_state_zone(zone, member, delta) \ | ||
131 | do { \ | ||
132 | mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ | ||
133 | } while (0) | ||
134 | |||
135 | DECLARE_PER_CPU(struct page_state, page_states); | ||
136 | |||
137 | #endif /* _LINUX_VMSTAT_H */ | ||
138 | |||