diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-06-16 18:32:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:38 -0400 |
commit | 177975495914efb372f7edee28ba9a0fdb754149 (patch) | |
tree | d26a09871ee7b250a5c96e323cc4d5245e35eeff /fs/proc/page.c | |
parent | ed7ce0f1022942301776f93159c981b09382ddea (diff) |
proc: export more page flags in /proc/kpageflags
Export all page flags faithfully in /proc/kpageflags.
11. KPF_MMAP (pseudo flag) memory mapped page
12. KPF_ANON (pseudo flag) memory mapped page (anonymous)
13. KPF_SWAPCACHE page is in swap cache
14. KPF_SWAPBACKED page is swap/RAM backed
15. KPF_COMPOUND_HEAD (*)
16. KPF_COMPOUND_TAIL (*)
17. KPF_HUGE hugeTLB pages
18. KPF_UNEVICTABLE page is in the unevictable LRU list
19. KPF_HWPOISON(TBD) hardware detected corruption
20. KPF_NOPAGE (pseudo flag) no page frame at the address
32-39. more obscure flags for kernel developers
(*) For compound pages, exporting _both_ head/tail info enables
users to tell where a compound page starts/ends, and its order.
The accompanying page-types tool will handle the details like decoupling
overloaded flags and hiding obscure flags to normal users.
Thanks to KOSAKI and Andi for their valuable recommendations!
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/page.c')
-rw-r--r-- | fs/proc/page.c | 152 |
1 files changed, 120 insertions, 32 deletions
diff --git a/fs/proc/page.c b/fs/proc/page.c index e73e911b7d0c..9d926bd279a4 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -72,19 +72,124 @@ static const struct file_operations proc_kpagecount_operations = { | |||
72 | 72 | ||
73 | /* These macros are used to decouple internal flags from exported ones */ | 73 | /* These macros are used to decouple internal flags from exported ones */ |
74 | 74 | ||
75 | #define KPF_LOCKED 0 | 75 | #define KPF_LOCKED 0 |
76 | #define KPF_ERROR 1 | 76 | #define KPF_ERROR 1 |
77 | #define KPF_REFERENCED 2 | 77 | #define KPF_REFERENCED 2 |
78 | #define KPF_UPTODATE 3 | 78 | #define KPF_UPTODATE 3 |
79 | #define KPF_DIRTY 4 | 79 | #define KPF_DIRTY 4 |
80 | #define KPF_LRU 5 | 80 | #define KPF_LRU 5 |
81 | #define KPF_ACTIVE 6 | 81 | #define KPF_ACTIVE 6 |
82 | #define KPF_SLAB 7 | 82 | #define KPF_SLAB 7 |
83 | #define KPF_WRITEBACK 8 | 83 | #define KPF_WRITEBACK 8 |
84 | #define KPF_RECLAIM 9 | 84 | #define KPF_RECLAIM 9 |
85 | #define KPF_BUDDY 10 | 85 | #define KPF_BUDDY 10 |
86 | 86 | ||
87 | #define kpf_copy_bit(flags, dstpos, srcpos) (((flags >> srcpos) & 1) << dstpos) | 87 | /* 11-20: new additions in 2.6.31 */ |
88 | #define KPF_MMAP 11 | ||
89 | #define KPF_ANON 12 | ||
90 | #define KPF_SWAPCACHE 13 | ||
91 | #define KPF_SWAPBACKED 14 | ||
92 | #define KPF_COMPOUND_HEAD 15 | ||
93 | #define KPF_COMPOUND_TAIL 16 | ||
94 | #define KPF_HUGE 17 | ||
95 | #define KPF_UNEVICTABLE 18 | ||
96 | #define KPF_NOPAGE 20 | ||
97 | |||
98 | /* kernel hacking assistances | ||
99 | * WARNING: subject to change, never rely on them! | ||
100 | */ | ||
101 | #define KPF_RESERVED 32 | ||
102 | #define KPF_MLOCKED 33 | ||
103 | #define KPF_MAPPEDTODISK 34 | ||
104 | #define KPF_PRIVATE 35 | ||
105 | #define KPF_PRIVATE_2 36 | ||
106 | #define KPF_OWNER_PRIVATE 37 | ||
107 | #define KPF_ARCH 38 | ||
108 | #define KPF_UNCACHED 39 | ||
109 | |||
110 | static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) | ||
111 | { | ||
112 | return ((kflags >> kbit) & 1) << ubit; | ||
113 | } | ||
114 | |||
115 | static u64 get_uflags(struct page *page) | ||
116 | { | ||
117 | u64 k; | ||
118 | u64 u; | ||
119 | |||
120 | /* | ||
121 | * pseudo flag: KPF_NOPAGE | ||
122 | * it differentiates a memory hole from a page with no flags | ||
123 | */ | ||
124 | if (!page) | ||
125 | return 1 << KPF_NOPAGE; | ||
126 | |||
127 | k = page->flags; | ||
128 | u = 0; | ||
129 | |||
130 | /* | ||
131 | * pseudo flags for the well known (anonymous) memory mapped pages | ||
132 | * | ||
133 | * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the | ||
134 | * simple test in page_mapped() is not enough. | ||
135 | */ | ||
136 | if (!PageSlab(page) && page_mapped(page)) | ||
137 | u |= 1 << KPF_MMAP; | ||
138 | if (PageAnon(page)) | ||
139 | u |= 1 << KPF_ANON; | ||
140 | |||
141 | /* | ||
142 | * compound pages: export both head/tail info | ||
143 | * they together define a compound page's start/end pos and order | ||
144 | */ | ||
145 | if (PageHead(page)) | ||
146 | u |= 1 << KPF_COMPOUND_HEAD; | ||
147 | if (PageTail(page)) | ||
148 | u |= 1 << KPF_COMPOUND_TAIL; | ||
149 | if (PageHuge(page)) | ||
150 | u |= 1 << KPF_HUGE; | ||
151 | |||
152 | u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); | ||
153 | |||
154 | /* | ||
155 | * Caveats on high order pages: | ||
156 | * PG_buddy will only be set on the head page; SLUB/SLQB do the same | ||
157 | * for PG_slab; SLOB won't set PG_slab at all on compound pages. | ||
158 | */ | ||
159 | u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); | ||
160 | u |= kpf_copy_bit(k, KPF_BUDDY, PG_buddy); | ||
161 | |||
162 | u |= kpf_copy_bit(k, KPF_ERROR, PG_error); | ||
163 | u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); | ||
164 | u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); | ||
165 | u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); | ||
166 | |||
167 | u |= kpf_copy_bit(k, KPF_LRU, PG_lru); | ||
168 | u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); | ||
169 | u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); | ||
170 | u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); | ||
171 | |||
172 | u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); | ||
173 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); | ||
174 | |||
175 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
176 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); | ||
177 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); | ||
178 | #endif | ||
179 | |||
180 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | ||
181 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | ||
182 | #endif | ||
183 | |||
184 | u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); | ||
185 | u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); | ||
186 | u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); | ||
187 | u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); | ||
188 | u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); | ||
189 | u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); | ||
190 | |||
191 | return u; | ||
192 | }; | ||
88 | 193 | ||
89 | static ssize_t kpageflags_read(struct file *file, char __user *buf, | 194 | static ssize_t kpageflags_read(struct file *file, char __user *buf, |
90 | size_t count, loff_t *ppos) | 195 | size_t count, loff_t *ppos) |
@@ -94,7 +199,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, | |||
94 | unsigned long src = *ppos; | 199 | unsigned long src = *ppos; |
95 | unsigned long pfn; | 200 | unsigned long pfn; |
96 | ssize_t ret = 0; | 201 | ssize_t ret = 0; |
97 | u64 kflags, uflags; | ||
98 | 202 | ||
99 | pfn = src / KPMSIZE; | 203 | pfn = src / KPMSIZE; |
100 | count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); | 204 | count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); |
@@ -106,24 +210,8 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, | |||
106 | ppage = pfn_to_page(pfn); | 210 | ppage = pfn_to_page(pfn); |
107 | else | 211 | else |
108 | ppage = NULL; | 212 | ppage = NULL; |
109 | if (!ppage) | 213 | |
110 | kflags = 0; | 214 | if (put_user(get_uflags(ppage), out)) { |
111 | else | ||
112 | kflags = ppage->flags; | ||
113 | |||
114 | uflags = kpf_copy_bit(kflags, KPF_LOCKED, PG_locked) | | ||
115 | kpf_copy_bit(kflags, KPF_ERROR, PG_error) | | ||
116 | kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) | | ||
117 | kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) | | ||
118 | kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) | | ||
119 | kpf_copy_bit(kflags, KPF_LRU, PG_lru) | | ||
120 | kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) | | ||
121 | kpf_copy_bit(kflags, KPF_SLAB, PG_slab) | | ||
122 | kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) | | ||
123 | kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) | | ||
124 | kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy); | ||
125 | |||
126 | if (put_user(uflags, out)) { | ||
127 | ret = -EFAULT; | 215 | ret = -EFAULT; |
128 | break; | 216 | break; |
129 | } | 217 | } |