aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr_32.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 07:33:41 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:41 -0500
commit9f4c815ce7ab53150f17c97a382f136a8fb68044 (patch)
tree3a160096751441f2a12e79e3f78f7e808ae3bda9 /arch/x86/mm/pageattr_32.c
parent6371b495991debfd1417b17c2bc4f7d7bae05739 (diff)
x86: clean up arch/x86/mm/pageattr_32.c
clean up arch/x86/mm/pageattr_32.c. no code changed: text data bss dec hex filename 1255 40 0 1295 50f pageattr_32.o.before 1255 40 0 1295 50f pageattr_32.o.after Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/pageattr_32.c')
-rw-r--r--arch/x86/mm/pageattr_32.c151
1 files changed, 83 insertions, 68 deletions
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index 260073c07600..be4656403d77 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -1,28 +1,29 @@
1/* 1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs. 2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback. 3 * Thanks to Ben LaHaise for precious feedback.
4 */ 4 */
5 5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/highmem.h> 6#include <linux/highmem.h>
9#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/sched.h>
10#include <linux/slab.h> 9#include <linux/slab.h>
11#include <asm/uaccess.h> 10#include <linux/mm.h>
11
12#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
14#include <asm/pgalloc.h>
15#include <asm/sections.h> 14#include <asm/sections.h>
15#include <asm/uaccess.h>
16#include <asm/pgalloc.h>
16 17
17static DEFINE_SPINLOCK(cpa_lock); 18static DEFINE_SPINLOCK(cpa_lock);
18static struct list_head df_list = LIST_HEAD_INIT(df_list); 19static struct list_head df_list = LIST_HEAD_INIT(df_list);
19 20
20 21pte_t *lookup_address(unsigned long address)
21pte_t *lookup_address(unsigned long address) 22{
22{
23 pgd_t *pgd = pgd_offset_k(address); 23 pgd_t *pgd = pgd_offset_k(address);
24 pud_t *pud; 24 pud_t *pud;
25 pmd_t *pmd; 25 pmd_t *pmd;
26
26 if (pgd_none(*pgd)) 27 if (pgd_none(*pgd))
27 return NULL; 28 return NULL;
28 pud = pud_offset(pgd, address); 29 pud = pud_offset(pgd, address);
@@ -33,21 +34,22 @@ pte_t *lookup_address(unsigned long address)
33 return NULL; 34 return NULL;
34 if (pmd_large(*pmd)) 35 if (pmd_large(*pmd))
35 return (pte_t *)pmd; 36 return (pte_t *)pmd;
36 return pte_offset_kernel(pmd, address);
37}
38 37
39static struct page *split_large_page(unsigned long address, pgprot_t prot, 38 return pte_offset_kernel(pmd, address);
40 pgprot_t ref_prot) 39}
41{ 40
42 int i; 41static struct page *
42split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
43{
43 unsigned long addr; 44 unsigned long addr;
44 struct page *base; 45 struct page *base;
45 pte_t *pbase; 46 pte_t *pbase;
47 int i;
46 48
47 spin_unlock_irq(&cpa_lock); 49 spin_unlock_irq(&cpa_lock);
48 base = alloc_pages(GFP_KERNEL, 0); 50 base = alloc_pages(GFP_KERNEL, 0);
49 spin_lock_irq(&cpa_lock); 51 spin_lock_irq(&cpa_lock);
50 if (!base) 52 if (!base)
51 return NULL; 53 return NULL;
52 54
53 /* 55 /*
@@ -58,22 +60,24 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
58 page_private(base) = 0; 60 page_private(base) = 0;
59 61
60 address = __pa(address); 62 address = __pa(address);
61 addr = address & LARGE_PAGE_MASK; 63 addr = address & LARGE_PAGE_MASK;
62 pbase = (pte_t *)page_address(base); 64 pbase = (pte_t *)page_address(base);
63 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); 65 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
66
64 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { 67 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
65 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, 68 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
66 addr == address ? prot : ref_prot)); 69 addr == address ? prot : ref_prot));
67 } 70 }
68 return base; 71 return base;
69} 72}
70 73
71static void cache_flush_page(struct page *p) 74static void cache_flush_page(struct page *p)
72{ 75{
73 void *adr = page_address(p); 76 void *addr = page_address(p);
74 int i; 77 int i;
78
75 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 79 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
76 clflush(adr+i); 80 clflush(addr + i);
77} 81}
78 82
79static void flush_kernel_map(void *arg) 83static void flush_kernel_map(void *arg)
@@ -83,23 +87,27 @@ static void flush_kernel_map(void *arg)
83 87
84 /* High level code is not ready for clflush yet */ 88 /* High level code is not ready for clflush yet */
85 if (0 && cpu_has_clflush) { 89 if (0 && cpu_has_clflush) {
86 list_for_each_entry (p, lh, lru) 90 list_for_each_entry(p, lh, lru)
87 cache_flush_page(p); 91 cache_flush_page(p);
88 } else if (boot_cpu_data.x86_model >= 4) 92 } else {
89 wbinvd(); 93 if (boot_cpu_data.x86_model >= 4)
94 wbinvd();
95 }
90 96
91 /* Flush all to work around Errata in early athlons regarding 97 /*
92 * large page flushing. 98 * Flush all to work around Errata in early athlons regarding
99 * large page flushing.
93 */ 100 */
94 __flush_tlb_all(); 101 __flush_tlb_all();
95} 102}
96 103
97static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 104static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
98{ 105{
99 struct page *page;
100 unsigned long flags; 106 unsigned long flags;
107 struct page *page;
101 108
102 set_pte_atomic(kpte, pte); /* change init_mm */ 109 /* change init_mm */
110 set_pte_atomic(kpte, pte);
103 if (SHARED_KERNEL_PMD) 111 if (SHARED_KERNEL_PMD)
104 return; 112 return;
105 113
@@ -108,6 +116,7 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
108 pgd_t *pgd; 116 pgd_t *pgd;
109 pud_t *pud; 117 pud_t *pud;
110 pmd_t *pmd; 118 pmd_t *pmd;
119
111 pgd = (pgd_t *)page_address(page) + pgd_index(address); 120 pgd = (pgd_t *)page_address(page) + pgd_index(address);
112 pud = pud_offset(pgd, address); 121 pud = pud_offset(pgd, address);
113 pmd = pmd_offset(pud, address); 122 pmd = pmd_offset(pud, address);
@@ -116,9 +125,9 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
116 spin_unlock_irqrestore(&pgd_lock, flags); 125 spin_unlock_irqrestore(&pgd_lock, flags);
117} 126}
118 127
119/* 128/*
120 * No more special protections in this 2/4MB area - revert to a 129 * No more special protections in this 2/4MB area - revert to a large
121 * large page again. 130 * page again.
122 */ 131 */
123static inline void revert_page(struct page *kpte_page, unsigned long address) 132static inline void revert_page(struct page *kpte_page, unsigned long address)
124{ 133{
@@ -142,12 +151,11 @@ static inline void save_page(struct page *kpte_page)
142 list_add(&kpte_page->lru, &df_list); 151 list_add(&kpte_page->lru, &df_list);
143} 152}
144 153
145static int 154static int __change_page_attr(struct page *page, pgprot_t prot)
146__change_page_attr(struct page *page, pgprot_t prot) 155{
147{
148 pte_t *kpte;
149 unsigned long address;
150 struct page *kpte_page; 156 struct page *kpte_page;
157 unsigned long address;
158 pte_t *kpte;
151 159
152 BUG_ON(PageHighMem(page)); 160 BUG_ON(PageHighMem(page));
153 address = (unsigned long)page_address(page); 161 address = (unsigned long)page_address(page);
@@ -155,16 +163,17 @@ __change_page_attr(struct page *page, pgprot_t prot)
155 kpte = lookup_address(address); 163 kpte = lookup_address(address);
156 if (!kpte) 164 if (!kpte)
157 return -EINVAL; 165 return -EINVAL;
166
158 kpte_page = virt_to_page(kpte); 167 kpte_page = virt_to_page(kpte);
159 BUG_ON(PageLRU(kpte_page)); 168 BUG_ON(PageLRU(kpte_page));
160 BUG_ON(PageCompound(kpte_page)); 169 BUG_ON(PageCompound(kpte_page));
161 170
162 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 171 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
163 if (!pte_huge(*kpte)) { 172 if (!pte_huge(*kpte)) {
164 set_pte_atomic(kpte, mk_pte(page, prot)); 173 set_pte_atomic(kpte, mk_pte(page, prot));
165 } else { 174 } else {
166 pgprot_t ref_prot;
167 struct page *split; 175 struct page *split;
176 pgprot_t ref_prot;
168 177
169 ref_prot = 178 ref_prot =
170 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) 179 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
@@ -172,16 +181,19 @@ __change_page_attr(struct page *page, pgprot_t prot)
172 split = split_large_page(address, prot, ref_prot); 181 split = split_large_page(address, prot, ref_prot);
173 if (!split) 182 if (!split)
174 return -ENOMEM; 183 return -ENOMEM;
175 set_pmd_pte(kpte,address,mk_pte(split, ref_prot)); 184
185 set_pmd_pte(kpte, address, mk_pte(split, ref_prot));
176 kpte_page = split; 186 kpte_page = split;
177 } 187 }
178 page_private(kpte_page)++; 188 page_private(kpte_page)++;
179 } else if (!pte_huge(*kpte)) { 189 } else {
180 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL)); 190 if (!pte_huge(*kpte)) {
181 BUG_ON(page_private(kpte_page) == 0); 191 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
182 page_private(kpte_page)--; 192 BUG_ON(page_private(kpte_page) == 0);
183 } else 193 page_private(kpte_page)--;
184 BUG(); 194 } else
195 BUG();
196 }
185 197
186 /* 198 /*
187 * If the pte was reserved, it means it was created at boot 199 * If the pte was reserved, it means it was created at boot
@@ -197,7 +209,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
197 } 209 }
198 } 210 }
199 return 0; 211 return 0;
200} 212}
201 213
202static inline void flush_map(struct list_head *l) 214static inline void flush_map(struct list_head *l)
203{ 215{
@@ -211,32 +223,33 @@ static inline void flush_map(struct list_head *l)
211 * than write-back somewhere - some CPUs do not like it when mappings with 223 * than write-back somewhere - some CPUs do not like it when mappings with
212 * different caching policies exist. This changes the page attributes of the 224 * different caching policies exist. This changes the page attributes of the
213 * in kernel linear mapping too. 225 * in kernel linear mapping too.
214 * 226 *
215 * The caller needs to ensure that there are no conflicting mappings elsewhere. 227 * The caller needs to ensure that there are no conflicting mappings elsewhere.
216 * This function only deals with the kernel linear map. 228 * This function only deals with the kernel linear map.
217 * 229 *
218 * Caller must call global_flush_tlb() after this. 230 * Caller must call global_flush_tlb() after this.
219 */ 231 */
220int change_page_attr(struct page *page, int numpages, pgprot_t prot) 232int change_page_attr(struct page *page, int numpages, pgprot_t prot)
221{ 233{
222 int err = 0;
223 int i;
224 unsigned long flags; 234 unsigned long flags;
235 int err = 0, i;
225 236
226 spin_lock_irqsave(&cpa_lock, flags); 237 spin_lock_irqsave(&cpa_lock, flags);
227 for (i = 0; i < numpages; i++, page++) { 238 for (i = 0; i < numpages; i++, page++) {
228 err = __change_page_attr(page, prot); 239 err = __change_page_attr(page, prot);
229 if (err) 240 if (err)
230 break; 241 break;
231 } 242 }
232 spin_unlock_irqrestore(&cpa_lock, flags); 243 spin_unlock_irqrestore(&cpa_lock, flags);
244
233 return err; 245 return err;
234} 246}
247EXPORT_SYMBOL(change_page_attr);
235 248
236void global_flush_tlb(void) 249void global_flush_tlb(void)
237{ 250{
238 struct list_head l;
239 struct page *pg, *next; 251 struct page *pg, *next;
252 struct list_head l;
240 253
241 BUG_ON(irqs_disabled()); 254 BUG_ON(irqs_disabled());
242 255
@@ -253,26 +266,28 @@ void global_flush_tlb(void)
253 __free_page(pg); 266 __free_page(pg);
254 } 267 }
255} 268}
269EXPORT_SYMBOL(global_flush_tlb);
256 270
257#ifdef CONFIG_DEBUG_PAGEALLOC 271#ifdef CONFIG_DEBUG_PAGEALLOC
258void kernel_map_pages(struct page *page, int numpages, int enable) 272void kernel_map_pages(struct page *page, int numpages, int enable)
259{ 273{
260 if (PageHighMem(page)) 274 if (PageHighMem(page))
261 return; 275 return;
262 if (!enable) 276 if (!enable) {
263 debug_check_no_locks_freed(page_address(page), 277 debug_check_no_locks_freed(page_address(page),
264 numpages * PAGE_SIZE); 278 numpages * PAGE_SIZE);
279 }
265 280
266 /* the return value is ignored - the calls cannot fail, 281 /*
282 * the return value is ignored - the calls cannot fail,
267 * large pages are disabled at boot time. 283 * large pages are disabled at boot time.
268 */ 284 */
269 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); 285 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
270 /* we should perform an IPI and flush all tlbs, 286
287 /*
288 * we should perform an IPI and flush all tlbs,
271 * but that can deadlock->flush only current cpu. 289 * but that can deadlock->flush only current cpu.
272 */ 290 */
273 __flush_tlb_all(); 291 __flush_tlb_all();
274} 292}
275#endif 293#endif
276
277EXPORT_SYMBOL(change_page_attr);
278EXPORT_SYMBOL(global_flush_tlb);