diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:55 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:55 -0500 |
commit | 78c94abaea55df7003f3ad0e5b6c78ee1cc860bb (patch) | |
tree | 50ce3cf5378ed5bf0dcd84a5f28bde598a6b78bc /arch/x86/mm/pageattr_32.c | |
parent | a2172e2586f6662af996e47f417bb718c37cf8d2 (diff) |
x86: simplify the 32-bit cpa code
simplify the 32-bit cpa code.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/pageattr_32.c')
-rw-r--r-- | arch/x86/mm/pageattr_32.c | 179 |
1 files changed, 56 insertions, 123 deletions
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c index 66688a630839..570a37bf1401 100644 --- a/arch/x86/mm/pageattr_32.c +++ b/arch/x86/mm/pageattr_32.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
16 | #include <asm/pgalloc.h> | 16 | #include <asm/pgalloc.h> |
17 | 17 | ||
18 | static DEFINE_SPINLOCK(cpa_lock); | ||
19 | static struct list_head df_list = LIST_HEAD_INIT(df_list); | ||
20 | |||
21 | pte_t *lookup_address(unsigned long address, int *level) | 18 | pte_t *lookup_address(unsigned long address, int *level) |
22 | { | 19 | { |
23 | pgd_t *pgd = pgd_offset_k(address); | 20 | pgd_t *pgd = pgd_offset_k(address); |
@@ -48,9 +45,7 @@ split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot) | |||
48 | pte_t *pbase; | 45 | pte_t *pbase; |
49 | int i; | 46 | int i; |
50 | 47 | ||
51 | spin_unlock_irq(&cpa_lock); | ||
52 | base = alloc_pages(GFP_KERNEL, 0); | 48 | base = alloc_pages(GFP_KERNEL, 0); |
53 | spin_lock_irq(&cpa_lock); | ||
54 | if (!base) | 49 | if (!base) |
55 | return NULL; | 50 | return NULL; |
56 | 51 | ||
@@ -58,9 +53,6 @@ split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot) | |||
58 | * page_private is used to track the number of entries in | 53 | * page_private is used to track the number of entries in |
59 | * the page table page that have non standard attributes. | 54 | * the page table page that have non standard attributes. |
60 | */ | 55 | */ |
61 | SetPagePrivate(base); | ||
62 | page_private(base) = 0; | ||
63 | |||
64 | address = __pa(address); | 56 | address = __pa(address); |
65 | addr = address & LARGE_PAGE_MASK; | 57 | addr = address & LARGE_PAGE_MASK; |
66 | pbase = (pte_t *)page_address(base); | 58 | pbase = (pte_t *)page_address(base); |
@@ -73,36 +65,6 @@ split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot) | |||
73 | return base; | 65 | return base; |
74 | } | 66 | } |
75 | 67 | ||
76 | static void cache_flush_page(struct page *p) | ||
77 | { | ||
78 | void *addr = page_address(p); | ||
79 | int i; | ||
80 | |||
81 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | ||
82 | clflush(addr + i); | ||
83 | } | ||
84 | |||
85 | static void flush_kernel_map(void *arg) | ||
86 | { | ||
87 | struct list_head *lh = (struct list_head *)arg; | ||
88 | struct page *p; | ||
89 | |||
90 | /* | ||
91 | * Flush all to work around Errata in early athlons regarding | ||
92 | * large page flushing. | ||
93 | */ | ||
94 | __flush_tlb_all(); | ||
95 | |||
96 | /* High level code is not ready for clflush yet */ | ||
97 | if (0 && cpu_has_clflush) { | ||
98 | list_for_each_entry(p, lh, lru) | ||
99 | cache_flush_page(p); | ||
100 | } else { | ||
101 | if (boot_cpu_data.x86_model >= 4) | ||
102 | wbinvd(); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | 68 | static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
107 | { | 69 | { |
108 | unsigned long flags; | 70 | unsigned long flags; |
@@ -127,36 +89,12 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | |||
127 | spin_unlock_irqrestore(&pgd_lock, flags); | 89 | spin_unlock_irqrestore(&pgd_lock, flags); |
128 | } | 90 | } |
129 | 91 | ||
130 | /* | ||
131 | * No more special protections in this 2/4MB area - revert to a large | ||
132 | * page again. | ||
133 | */ | ||
134 | static inline void revert_page(struct page *kpte_page, unsigned long address) | ||
135 | { | ||
136 | pgprot_t ref_prot; | ||
137 | pte_t *linear; | ||
138 | |||
139 | ref_prot = | ||
140 | ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) | ||
141 | ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE; | ||
142 | |||
143 | linear = (pte_t *) | ||
144 | pmd_offset(pud_offset(pgd_offset_k(address), address), address); | ||
145 | set_pmd_pte(linear, address, | ||
146 | pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT, | ||
147 | ref_prot)); | ||
148 | } | ||
149 | |||
150 | static inline void save_page(struct page *kpte_page) | ||
151 | { | ||
152 | if (!test_and_set_bit(PG_arch_1, &kpte_page->flags)) | ||
153 | list_add(&kpte_page->lru, &df_list); | ||
154 | } | ||
155 | |||
156 | static int __change_page_attr(struct page *page, pgprot_t prot) | 92 | static int __change_page_attr(struct page *page, pgprot_t prot) |
157 | { | 93 | { |
94 | pgprot_t ref_prot = PAGE_KERNEL; | ||
158 | struct page *kpte_page; | 95 | struct page *kpte_page; |
159 | unsigned long address; | 96 | unsigned long address; |
97 | pgprot_t oldprot; | ||
160 | pte_t *kpte; | 98 | pte_t *kpte; |
161 | int level; | 99 | int level; |
162 | 100 | ||
@@ -167,58 +105,41 @@ static int __change_page_attr(struct page *page, pgprot_t prot) | |||
167 | if (!kpte) | 105 | if (!kpte) |
168 | return -EINVAL; | 106 | return -EINVAL; |
169 | 107 | ||
108 | oldprot = pte_pgprot(*kpte); | ||
170 | kpte_page = virt_to_page(kpte); | 109 | kpte_page = virt_to_page(kpte); |
171 | BUG_ON(PageLRU(kpte_page)); | 110 | BUG_ON(PageLRU(kpte_page)); |
172 | BUG_ON(PageCompound(kpte_page)); | 111 | BUG_ON(PageCompound(kpte_page)); |
173 | 112 | ||
174 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { | ||
175 | if (level == 3) { | ||
176 | set_pte_atomic(kpte, mk_pte(page, prot)); | ||
177 | } else { | ||
178 | struct page *split; | ||
179 | pgprot_t ref_prot; | ||
180 | |||
181 | ref_prot = | ||
182 | ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) | ||
183 | ? PAGE_KERNEL_EXEC : PAGE_KERNEL; | ||
184 | split = split_large_page(address, prot, ref_prot); | ||
185 | if (!split) | ||
186 | return -ENOMEM; | ||
187 | |||
188 | set_pmd_pte(kpte, address, mk_pte(split, ref_prot)); | ||
189 | kpte_page = split; | ||
190 | } | ||
191 | page_private(kpte_page)++; | ||
192 | } else { | ||
193 | if (level == 3) { | ||
194 | set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL)); | ||
195 | BUG_ON(page_private(kpte_page) == 0); | ||
196 | page_private(kpte_page)--; | ||
197 | } else | ||
198 | BUG(); | ||
199 | } | ||
200 | |||
201 | /* | 113 | /* |
202 | * If the pte was reserved, it means it was created at boot | 114 | * Better fail early if someone sets the kernel text to NX. |
203 | * time (not via split_large_page) and in turn we must not | 115 | * Does not cover __inittext |
204 | * replace it with a largepage. | ||
205 | */ | 116 | */ |
117 | BUG_ON(address >= (unsigned long)&_text && | ||
118 | address < (unsigned long)&_etext && | ||
119 | (pgprot_val(prot) & _PAGE_NX)); | ||
206 | 120 | ||
207 | save_page(kpte_page); | 121 | if ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext) |
208 | if (!PageReserved(kpte_page)) { | 122 | ref_prot = PAGE_KERNEL_EXEC; |
209 | if (cpu_has_pse && (page_private(kpte_page) == 0)) { | 123 | |
210 | paravirt_release_pt(page_to_pfn(kpte_page)); | 124 | ref_prot = canon_pgprot(ref_prot); |
211 | revert_page(kpte_page, address); | 125 | prot = canon_pgprot(prot); |
212 | } | 126 | |
127 | if (level == 3) { | ||
128 | set_pte_atomic(kpte, mk_pte(page, prot)); | ||
129 | } else { | ||
130 | struct page *split; | ||
131 | split = split_large_page(address, prot, ref_prot); | ||
132 | if (!split) | ||
133 | return -ENOMEM; | ||
134 | |||
135 | /* | ||
136 | * There's a small window here to waste a bit of RAM: | ||
137 | */ | ||
138 | set_pmd_pte(kpte, address, mk_pte(split, ref_prot)); | ||
213 | } | 139 | } |
214 | return 0; | 140 | return 0; |
215 | } | 141 | } |
216 | 142 | ||
217 | static inline void flush_map(struct list_head *l) | ||
218 | { | ||
219 | on_each_cpu(flush_kernel_map, l, 1, 1); | ||
220 | } | ||
221 | |||
222 | /* | 143 | /* |
223 | * Change the page attributes of an page in the linear mapping. | 144 | * Change the page attributes of an page in the linear mapping. |
224 | * | 145 | * |
@@ -234,40 +155,52 @@ static inline void flush_map(struct list_head *l) | |||
234 | */ | 155 | */ |
235 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) | 156 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) |
236 | { | 157 | { |
237 | unsigned long flags; | ||
238 | int err = 0, i; | 158 | int err = 0, i; |
239 | 159 | ||
240 | spin_lock_irqsave(&cpa_lock, flags); | ||
241 | for (i = 0; i < numpages; i++, page++) { | 160 | for (i = 0; i < numpages; i++, page++) { |
242 | err = __change_page_attr(page, prot); | 161 | err = __change_page_attr(page, prot); |
243 | if (err) | 162 | if (err) |
244 | break; | 163 | break; |
245 | } | 164 | } |
246 | spin_unlock_irqrestore(&cpa_lock, flags); | ||
247 | 165 | ||
248 | return err; | 166 | return err; |
249 | } | 167 | } |
250 | EXPORT_SYMBOL(change_page_attr); | 168 | EXPORT_SYMBOL(change_page_attr); |
251 | 169 | ||
252 | void global_flush_tlb(void) | 170 | int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot) |
253 | { | 171 | { |
254 | struct page *pg, *next; | 172 | int i; |
255 | struct list_head l; | 173 | unsigned long pfn = (addr >> PAGE_SHIFT); |
256 | 174 | ||
175 | for (i = 0; i < numpages; i++) { | ||
176 | if (!pfn_valid(pfn + i)) { | ||
177 | break; | ||
178 | } else { | ||
179 | int level; | ||
180 | pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level); | ||
181 | BUG_ON(pte && !pte_none(*pte)); | ||
182 | } | ||
183 | } | ||
184 | return change_page_attr(virt_to_page(addr), i, prot); | ||
185 | } | ||
186 | |||
187 | static void flush_kernel_map(void *arg) | ||
188 | { | ||
189 | /* | ||
190 | * Flush all to work around Errata in early athlons regarding | ||
191 | * large page flushing. | ||
192 | */ | ||
193 | __flush_tlb_all(); | ||
194 | |||
195 | if (boot_cpu_data.x86_model >= 4) | ||
196 | wbinvd(); | ||
197 | } | ||
198 | |||
199 | void global_flush_tlb(void) | ||
200 | { | ||
257 | BUG_ON(irqs_disabled()); | 201 | BUG_ON(irqs_disabled()); |
258 | 202 | ||
259 | spin_lock_irq(&cpa_lock); | 203 | on_each_cpu(flush_kernel_map, NULL, 1, 1); |
260 | list_replace_init(&df_list, &l); | ||
261 | spin_unlock_irq(&cpa_lock); | ||
262 | flush_map(&l); | ||
263 | list_for_each_entry_safe(pg, next, &l, lru) { | ||
264 | list_del(&pg->lru); | ||
265 | clear_bit(PG_arch_1, &pg->flags); | ||
266 | if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0) | ||
267 | continue; | ||
268 | ClearPagePrivate(pg); | ||
269 | __free_page(pg); | ||
270 | } | ||
271 | } | 204 | } |
272 | EXPORT_SYMBOL(global_flush_tlb); | 205 | EXPORT_SYMBOL(global_flush_tlb); |
273 | 206 | ||