diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:41 -0500 |
commit | b4416a1be86b0c7bdde4e6ba526715c1a055746f (patch) | |
tree | 6aa9c06dbd26fc98209e07b2c8ecd1dbbecfdfa9 | |
parent | 9f4c815ce7ab53150f17c97a382f136a8fb68044 (diff) |
x86: clean up arch/x86/mm/pageattr_64.c
clean up arch/x86/mm/pageattr_64.c.
no code changed:
text data bss dec hex filename
1751 16 0 1767 6e7 pageattr_64.o.before
1751 16 0 1767 6e7 pageattr_64.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/mm/pageattr_64.c | 143 |
1 files changed, 81 insertions, 62 deletions
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c index c40afbaaf93d..14ab327cde0c 100644 --- a/arch/x86/mm/pageattr_64.c +++ b/arch/x86/mm/pageattr_64.c | |||
@@ -1,48 +1,54 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | 2 | * Copyright 2002 Andi Kleen, SuSE Labs. |
3 | * Thanks to Ben LaHaise for precious feedback. | 3 | * Thanks to Ben LaHaise for precious feedback. |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/highmem.h> | 6 | #include <linux/highmem.h> |
9 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/sched.h> | ||
10 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
11 | #include <asm/uaccess.h> | 10 | #include <linux/mm.h> |
11 | |||
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | #include <asm/uaccess.h> | ||
14 | #include <asm/io.h> | 15 | #include <asm/io.h> |
15 | 16 | ||
16 | pte_t *lookup_address(unsigned long address) | 17 | pte_t *lookup_address(unsigned long address) |
17 | { | 18 | { |
18 | pgd_t *pgd = pgd_offset_k(address); | 19 | pgd_t *pgd = pgd_offset_k(address); |
19 | pud_t *pud; | 20 | pud_t *pud; |
20 | pmd_t *pmd; | 21 | pmd_t *pmd; |
21 | pte_t *pte; | 22 | pte_t *pte; |
23 | |||
22 | if (pgd_none(*pgd)) | 24 | if (pgd_none(*pgd)) |
23 | return NULL; | 25 | return NULL; |
24 | pud = pud_offset(pgd, address); | 26 | pud = pud_offset(pgd, address); |
25 | if (!pud_present(*pud)) | 27 | if (!pud_present(*pud)) |
26 | return NULL; | 28 | return NULL; |
27 | pmd = pmd_offset(pud, address); | 29 | pmd = pmd_offset(pud, address); |
28 | if (!pmd_present(*pmd)) | 30 | if (!pmd_present(*pmd)) |
29 | return NULL; | 31 | return NULL; |
30 | if (pmd_large(*pmd)) | 32 | if (pmd_large(*pmd)) |
31 | return (pte_t *)pmd; | 33 | return (pte_t *)pmd; |
34 | |||
32 | pte = pte_offset_kernel(pmd, address); | 35 | pte = pte_offset_kernel(pmd, address); |
33 | if (pte && !pte_present(*pte)) | 36 | if (pte && !pte_present(*pte)) |
34 | pte = NULL; | 37 | pte = NULL; |
38 | |||
35 | return pte; | 39 | return pte; |
36 | } | 40 | } |
37 | 41 | ||
38 | static struct page *split_large_page(unsigned long address, pgprot_t prot, | 42 | static struct page * |
39 | pgprot_t ref_prot) | 43 | split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot) |
40 | { | 44 | { |
41 | int i; | ||
42 | unsigned long addr; | 45 | unsigned long addr; |
43 | struct page *base = alloc_pages(GFP_KERNEL, 0); | 46 | struct page *base; |
44 | pte_t *pbase; | 47 | pte_t *pbase; |
45 | if (!base) | 48 | int i; |
49 | |||
50 | base = alloc_pages(GFP_KERNEL, 0); | ||
51 | if (!base) | ||
46 | return NULL; | 52 | return NULL; |
47 | /* | 53 | /* |
48 | * page_private is used to track the number of entries in | 54 | * page_private is used to track the number of entries in |
@@ -52,20 +58,21 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, | |||
52 | page_private(base) = 0; | 58 | page_private(base) = 0; |
53 | 59 | ||
54 | address = __pa(address); | 60 | address = __pa(address); |
55 | addr = address & LARGE_PAGE_MASK; | 61 | addr = address & LARGE_PAGE_MASK; |
56 | pbase = (pte_t *)page_address(base); | 62 | pbase = (pte_t *)page_address(base); |
57 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { | 63 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { |
58 | pbase[i] = pfn_pte(addr >> PAGE_SHIFT, | 64 | pbase[i] = pfn_pte(addr >> PAGE_SHIFT, |
59 | addr == address ? prot : ref_prot); | 65 | addr == address ? prot : ref_prot); |
60 | } | 66 | } |
61 | return base; | 67 | return base; |
62 | } | 68 | } |
63 | 69 | ||
64 | void clflush_cache_range(void *adr, int size) | 70 | void clflush_cache_range(void *addr, int size) |
65 | { | 71 | { |
66 | int i; | 72 | int i; |
73 | |||
67 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) | 74 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) |
68 | clflush(adr+i); | 75 | clflush(addr+i); |
69 | } | 76 | } |
70 | 77 | ||
71 | static void flush_kernel_map(void *arg) | 78 | static void flush_kernel_map(void *arg) |
@@ -76,17 +83,20 @@ static void flush_kernel_map(void *arg) | |||
76 | /* When clflush is available always use it because it is | 83 | /* When clflush is available always use it because it is |
77 | much cheaper than WBINVD. */ | 84 | much cheaper than WBINVD. */ |
78 | /* clflush is still broken. Disable for now. */ | 85 | /* clflush is still broken. Disable for now. */ |
79 | if (1 || !cpu_has_clflush) | 86 | if (1 || !cpu_has_clflush) { |
80 | asm volatile("wbinvd" ::: "memory"); | 87 | asm volatile("wbinvd" ::: "memory"); |
81 | else list_for_each_entry(pg, l, lru) { | 88 | } else { |
82 | void *adr = page_address(pg); | 89 | list_for_each_entry(pg, l, lru) { |
83 | clflush_cache_range(adr, PAGE_SIZE); | 90 | void *addr = page_address(pg); |
91 | |||
92 | clflush_cache_range(addr, PAGE_SIZE); | ||
93 | } | ||
84 | } | 94 | } |
85 | __flush_tlb_all(); | 95 | __flush_tlb_all(); |
86 | } | 96 | } |
87 | 97 | ||
88 | static inline void flush_map(struct list_head *l) | 98 | static inline void flush_map(struct list_head *l) |
89 | { | 99 | { |
90 | on_each_cpu(flush_kernel_map, l, 1, 1); | 100 | on_each_cpu(flush_kernel_map, l, 1, 1); |
91 | } | 101 | } |
92 | 102 | ||
@@ -98,52 +108,56 @@ static inline void save_page(struct page *fpage) | |||
98 | list_add(&fpage->lru, &deferred_pages); | 108 | list_add(&fpage->lru, &deferred_pages); |
99 | } | 109 | } |
100 | 110 | ||
101 | /* | 111 | /* |
102 | * No more special protections in this 2/4MB area - revert to a | 112 | * No more special protections in this 2/4MB area - revert to a |
103 | * large page again. | 113 | * large page again. |
104 | */ | 114 | */ |
105 | static void revert_page(unsigned long address, pgprot_t ref_prot) | 115 | static void revert_page(unsigned long address, pgprot_t ref_prot) |
106 | { | 116 | { |
117 | unsigned long pfn; | ||
107 | pgd_t *pgd; | 118 | pgd_t *pgd; |
108 | pud_t *pud; | 119 | pud_t *pud; |
109 | pmd_t *pmd; | 120 | pmd_t *pmd; |
110 | pte_t large_pte; | 121 | pte_t large_pte; |
111 | unsigned long pfn; | ||
112 | 122 | ||
113 | pgd = pgd_offset_k(address); | 123 | pgd = pgd_offset_k(address); |
114 | BUG_ON(pgd_none(*pgd)); | 124 | BUG_ON(pgd_none(*pgd)); |
115 | pud = pud_offset(pgd,address); | 125 | pud = pud_offset(pgd, address); |
116 | BUG_ON(pud_none(*pud)); | 126 | BUG_ON(pud_none(*pud)); |
117 | pmd = pmd_offset(pud, address); | 127 | pmd = pmd_offset(pud, address); |
118 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); | 128 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
119 | pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT; | 129 | pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT; |
120 | large_pte = pfn_pte(pfn, ref_prot); | 130 | large_pte = pfn_pte(pfn, ref_prot); |
121 | large_pte = pte_mkhuge(large_pte); | 131 | large_pte = pte_mkhuge(large_pte); |
132 | |||
122 | set_pte((pte_t *)pmd, large_pte); | 133 | set_pte((pte_t *)pmd, large_pte); |
123 | } | 134 | } |
124 | 135 | ||
125 | static int | 136 | static int |
126 | __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | 137 | __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, |
127 | pgprot_t ref_prot) | 138 | pgprot_t ref_prot) |
128 | { | 139 | { |
129 | pte_t *kpte; | ||
130 | struct page *kpte_page; | 140 | struct page *kpte_page; |
131 | pgprot_t ref_prot2; | 141 | pgprot_t ref_prot2; |
142 | pte_t *kpte; | ||
132 | 143 | ||
133 | kpte = lookup_address(address); | 144 | kpte = lookup_address(address); |
134 | if (!kpte) return 0; | 145 | if (!kpte) |
146 | return 0; | ||
147 | |||
135 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); | 148 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
136 | BUG_ON(PageLRU(kpte_page)); | 149 | BUG_ON(PageLRU(kpte_page)); |
137 | BUG_ON(PageCompound(kpte_page)); | 150 | BUG_ON(PageCompound(kpte_page)); |
138 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { | 151 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { |
139 | if (!pte_huge(*kpte)) { | 152 | if (!pte_huge(*kpte)) { |
140 | set_pte(kpte, pfn_pte(pfn, prot)); | 153 | set_pte(kpte, pfn_pte(pfn, prot)); |
141 | } else { | 154 | } else { |
142 | /* | 155 | /* |
143 | * split_large_page will take the reference for this | 156 | * split_large_page will take the reference for this |
144 | * change_page_attr on the split page. | 157 | * change_page_attr on the split page. |
145 | */ | 158 | */ |
146 | struct page *split; | 159 | struct page *split; |
160 | |||
147 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); | 161 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); |
148 | split = split_large_page(address, prot, ref_prot2); | 162 | split = split_large_page(address, prot, ref_prot2); |
149 | if (!split) | 163 | if (!split) |
@@ -153,21 +167,23 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
153 | kpte_page = split; | 167 | kpte_page = split; |
154 | } | 168 | } |
155 | page_private(kpte_page)++; | 169 | page_private(kpte_page)++; |
156 | } else if (!pte_huge(*kpte)) { | 170 | } else { |
157 | set_pte(kpte, pfn_pte(pfn, ref_prot)); | 171 | if (!pte_huge(*kpte)) { |
158 | BUG_ON(page_private(kpte_page) == 0); | 172 | set_pte(kpte, pfn_pte(pfn, ref_prot)); |
159 | page_private(kpte_page)--; | 173 | BUG_ON(page_private(kpte_page) == 0); |
160 | } else | 174 | page_private(kpte_page)--; |
161 | BUG(); | 175 | } else |
176 | BUG(); | ||
177 | } | ||
162 | 178 | ||
163 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ | 179 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ |
164 | BUG_ON(PageReserved(kpte_page)); | 180 | BUG_ON(PageReserved(kpte_page)); |
165 | 181 | ||
166 | save_page(kpte_page); | 182 | save_page(kpte_page); |
167 | if (page_private(kpte_page) == 0) | 183 | if (page_private(kpte_page) == 0) |
168 | revert_page(address, ref_prot); | 184 | revert_page(address, ref_prot); |
169 | return 0; | 185 | return 0; |
170 | } | 186 | } |
171 | 187 | ||
172 | /* | 188 | /* |
173 | * Change the page attributes of an page in the linear mapping. | 189 | * Change the page attributes of an page in the linear mapping. |
@@ -176,19 +192,19 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
176 | * than write-back somewhere - some CPUs do not like it when mappings with | 192 | * than write-back somewhere - some CPUs do not like it when mappings with |
177 | * different caching policies exist. This changes the page attributes of the | 193 | * different caching policies exist. This changes the page attributes of the |
178 | * in kernel linear mapping too. | 194 | * in kernel linear mapping too. |
179 | * | 195 | * |
180 | * The caller needs to ensure that there are no conflicting mappings elsewhere. | 196 | * The caller needs to ensure that there are no conflicting mappings elsewhere. |
181 | * This function only deals with the kernel linear map. | 197 | * This function only deals with the kernel linear map. |
182 | * | 198 | * |
183 | * Caller must call global_flush_tlb() after this. | 199 | * Caller must call global_flush_tlb() after this. |
184 | */ | 200 | */ |
185 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | 201 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) |
186 | { | 202 | { |
187 | int err = 0, kernel_map = 0; | 203 | int err = 0, kernel_map = 0, i; |
188 | int i; | 204 | |
205 | if (address >= __START_KERNEL_map && | ||
206 | address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { | ||
189 | 207 | ||
190 | if (address >= __START_KERNEL_map | ||
191 | && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { | ||
192 | address = (unsigned long)__va(__pa(address)); | 208 | address = (unsigned long)__va(__pa(address)); |
193 | kernel_map = 1; | 209 | kernel_map = 1; |
194 | } | 210 | } |
@@ -198,7 +214,8 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | |||
198 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; | 214 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; |
199 | 215 | ||
200 | if (!kernel_map || pte_present(pfn_pte(0, prot))) { | 216 | if (!kernel_map || pte_present(pfn_pte(0, prot))) { |
201 | err = __change_page_attr(address, pfn, prot, PAGE_KERNEL); | 217 | err = __change_page_attr(address, pfn, prot, |
218 | PAGE_KERNEL); | ||
202 | if (err) | 219 | if (err) |
203 | break; | 220 | break; |
204 | } | 221 | } |
@@ -207,14 +224,16 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | |||
207 | if (__pa(address) < KERNEL_TEXT_SIZE) { | 224 | if (__pa(address) < KERNEL_TEXT_SIZE) { |
208 | unsigned long addr2; | 225 | unsigned long addr2; |
209 | pgprot_t prot2; | 226 | pgprot_t prot2; |
227 | |||
210 | addr2 = __START_KERNEL_map + __pa(address); | 228 | addr2 = __START_KERNEL_map + __pa(address); |
211 | /* Make sure the kernel mappings stay executable */ | 229 | /* Make sure the kernel mappings stay executable */ |
212 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); | 230 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); |
213 | err = __change_page_attr(addr2, pfn, prot2, | 231 | err = __change_page_attr(addr2, pfn, prot2, |
214 | PAGE_KERNEL_EXEC); | 232 | PAGE_KERNEL_EXEC); |
215 | } | 233 | } |
216 | } | 234 | } |
217 | up_write(&init_mm.mmap_sem); | 235 | up_write(&init_mm.mmap_sem); |
236 | |||
218 | return err; | 237 | return err; |
219 | } | 238 | } |
220 | 239 | ||
@@ -222,11 +241,13 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | |||
222 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) | 241 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) |
223 | { | 242 | { |
224 | unsigned long addr = (unsigned long)page_address(page); | 243 | unsigned long addr = (unsigned long)page_address(page); |
244 | |||
225 | return change_page_attr_addr(addr, numpages, prot); | 245 | return change_page_attr_addr(addr, numpages, prot); |
226 | } | 246 | } |
247 | EXPORT_SYMBOL(change_page_attr); | ||
227 | 248 | ||
228 | void global_flush_tlb(void) | 249 | void global_flush_tlb(void) |
229 | { | 250 | { |
230 | struct page *pg, *next; | 251 | struct page *pg, *next; |
231 | struct list_head l; | 252 | struct list_head l; |
232 | 253 | ||
@@ -248,8 +269,6 @@ void global_flush_tlb(void) | |||
248 | continue; | 269 | continue; |
249 | ClearPagePrivate(pg); | 270 | ClearPagePrivate(pg); |
250 | __free_page(pg); | 271 | __free_page(pg); |
251 | } | 272 | } |
252 | } | 273 | } |
253 | |||
254 | EXPORT_SYMBOL(change_page_attr); | ||
255 | EXPORT_SYMBOL(global_flush_tlb); | 274 | EXPORT_SYMBOL(global_flush_tlb); |