diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /arch/arm/mm | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/copypage-v3.c | 81 | ||||
-rw-r--r-- | arch/arm/mm/pageattr.c | 1076 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm6_7.S | 323 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v3.S | 48 |
4 files changed, 1528 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c new file mode 100644 index 00000000000..f72303e1d80 --- /dev/null +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-v3.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/highmem.h> | ||
12 | |||
13 | /* | ||
14 | * ARMv3 optimised copy_user_highpage | ||
15 | * | ||
16 | * FIXME: do we need to handle cache stuff... | ||
17 | */ | ||
18 | static void __naked | ||
19 | v3_copy_user_page(void *kto, const void *kfrom) | ||
20 | { | ||
21 | asm("\n\ | ||
22 | stmfd sp!, {r4, lr} @ 2\n\ | ||
23 | mov r2, %2 @ 1\n\ | ||
24 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
25 | 1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
26 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
27 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
28 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
29 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
30 | ldmia %0!, {r3, r4, ip, lr} @ 4\n\ | ||
31 | subs r2, r2, #1 @ 1\n\ | ||
32 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
33 | ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ | ||
34 | bne 1b @ 1\n\ | ||
35 | ldmfd sp!, {r4, pc} @ 3" | ||
36 | : | ||
37 | : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); | ||
38 | } | ||
39 | |||
40 | void v3_copy_user_highpage(struct page *to, struct page *from, | ||
41 | unsigned long vaddr, struct vm_area_struct *vma) | ||
42 | { | ||
43 | void *kto, *kfrom; | ||
44 | |||
45 | kto = kmap_atomic(to, KM_USER0); | ||
46 | kfrom = kmap_atomic(from, KM_USER1); | ||
47 | v3_copy_user_page(kto, kfrom); | ||
48 | kunmap_atomic(kfrom, KM_USER1); | ||
49 | kunmap_atomic(kto, KM_USER0); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * ARMv3 optimised clear_user_page | ||
54 | * | ||
55 | * FIXME: do we need to handle cache stuff... | ||
56 | */ | ||
57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
58 | { | ||
59 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
60 | asm volatile("\n\ | ||
61 | mov r1, %2 @ 1\n\ | ||
62 | mov r2, #0 @ 1\n\ | ||
63 | mov r3, #0 @ 1\n\ | ||
64 | mov ip, #0 @ 1\n\ | ||
65 | mov lr, #0 @ 1\n\ | ||
66 | 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
67 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
68 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
69 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
70 | subs r1, r1, #1 @ 1\n\ | ||
71 | bne 1b @ 1" | ||
72 | : "=r" (ptr) | ||
73 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | ||
74 | : "r1", "r2", "r3", "ip", "lr"); | ||
75 | kunmap_atomic(kaddr, KM_USER0); | ||
76 | } | ||
77 | |||
78 | struct cpu_user_fns v3_user_fns __initdata = { | ||
79 | .cpu_clear_user_highpage = v3_clear_user_highpage, | ||
80 | .cpu_copy_user_highpage = v3_copy_user_highpage, | ||
81 | }; | ||
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c new file mode 100644 index 00000000000..5f8071110e8 --- /dev/null +++ b/arch/arm/mm/pageattr.c | |||
@@ -0,0 +1,1076 @@ | |||
1 | /* | ||
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | ||
3 | * Thanks to Ben LaHaise for precious feedback. | ||
4 | */ | ||
5 | #include <linux/highmem.h> | ||
6 | #include <linux/bootmem.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/seq_file.h> | ||
12 | #include <linux/debugfs.h> | ||
13 | #include <linux/pfn.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/gfp.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/mutex.h> | ||
18 | |||
19 | #include <asm/processor.h> | ||
20 | #include <asm/tlbflush.h> | ||
21 | #include <asm/sections.h> | ||
22 | #include <asm/setup.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | |||
26 | #ifdef CPA_DEBUG | ||
27 | #define cpa_debug(x, ...) printk(x, __VA_ARGS__) | ||
28 | #else | ||
29 | #define cpa_debug(x, ...) | ||
30 | #endif | ||
31 | |||
32 | #define FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD 8 | ||
33 | extern void v7_flush_kern_cache_all(void *); | ||
34 | extern void __flush_dcache_page(struct address_space *, struct page *); | ||
35 | |||
36 | static void inner_flush_cache_all(void) | ||
37 | { | ||
38 | on_each_cpu(v7_flush_kern_cache_all, NULL, 1); | ||
39 | } | ||
40 | |||
41 | #if defined(CONFIG_CPA) | ||
42 | /* | ||
43 | * The current flushing context - we pass it instead of 5 arguments: | ||
44 | */ | ||
45 | struct cpa_data { | ||
46 | unsigned long *vaddr; | ||
47 | pgprot_t mask_set; | ||
48 | pgprot_t mask_clr; | ||
49 | int numpages; | ||
50 | int flags; | ||
51 | unsigned long pfn; | ||
52 | unsigned force_split:1; | ||
53 | int curpage; | ||
54 | struct page **pages; | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | ||
59 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | ||
60 | * entries change the page attribute in parallel to some other cpu | ||
61 | * splitting a large page entry along with changing the attribute. | ||
62 | */ | ||
63 | static DEFINE_MUTEX(cpa_lock); | ||
64 | |||
65 | #define CPA_FLUSHTLB 1 | ||
66 | #define CPA_ARRAY 2 | ||
67 | #define CPA_PAGES_ARRAY 4 | ||
68 | |||
69 | #ifdef CONFIG_PROC_FS | ||
70 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; | ||
71 | |||
72 | void update_page_count(int level, unsigned long pages) | ||
73 | { | ||
74 | unsigned long flags; | ||
75 | |||
76 | /* Protect against CPA */ | ||
77 | spin_lock_irqsave(&pgd_lock, flags); | ||
78 | direct_pages_count[level] += pages; | ||
79 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
80 | } | ||
81 | |||
82 | static void split_page_count(int level) | ||
83 | { | ||
84 | direct_pages_count[level]--; | ||
85 | direct_pages_count[level - 1] += PTRS_PER_PTE; | ||
86 | } | ||
87 | |||
88 | void arch_report_meminfo(struct seq_file *m) | ||
89 | { | ||
90 | seq_printf(m, "DirectMap4k: %8lu kB\n", | ||
91 | direct_pages_count[PG_LEVEL_4K] << 2); | ||
92 | seq_printf(m, "DirectMap2M: %8lu kB\n", | ||
93 | direct_pages_count[PG_LEVEL_2M] << 11); | ||
94 | } | ||
95 | #else | ||
96 | static inline void split_page_count(int level) { } | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
100 | # define debug_pagealloc 1 | ||
101 | #else | ||
102 | # define debug_pagealloc 0 | ||
103 | #endif | ||
104 | |||
105 | static inline int | ||
106 | within(unsigned long addr, unsigned long start, unsigned long end) | ||
107 | { | ||
108 | return addr >= start && addr < end; | ||
109 | } | ||
110 | |||
111 | static void cpa_flush_range(unsigned long start, int numpages, int cache) | ||
112 | { | ||
113 | unsigned int i, level; | ||
114 | unsigned long addr; | ||
115 | |||
116 | BUG_ON(irqs_disabled()); | ||
117 | WARN_ON(PAGE_ALIGN(start) != start); | ||
118 | |||
119 | flush_tlb_kernel_range(start, start + (numpages << PAGE_SHIFT)); | ||
120 | |||
121 | if (!cache) | ||
122 | return; | ||
123 | |||
124 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { | ||
125 | pte_t *pte = lookup_address(addr, &level); | ||
126 | |||
127 | /* | ||
128 | * Only flush present addresses: | ||
129 | */ | ||
130 | if (pte && pte_present(*pte)) { | ||
131 | __cpuc_flush_dcache_area((void *) addr, PAGE_SIZE); | ||
132 | outer_flush_range(__pa((void *)addr), | ||
133 | __pa((void *)addr) + PAGE_SIZE); | ||
134 | } | ||
135 | } | ||
136 | } | ||
137 | |||
138 | static void cpa_flush_array(unsigned long *start, int numpages, int cache, | ||
139 | int in_flags, struct page **pages) | ||
140 | { | ||
141 | unsigned int i, level; | ||
142 | bool flush_inner = true; | ||
143 | unsigned long base; | ||
144 | |||
145 | BUG_ON(irqs_disabled()); | ||
146 | |||
147 | if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD && | ||
148 | cache && in_flags & CPA_PAGES_ARRAY) { | ||
149 | inner_flush_cache_all(); | ||
150 | flush_inner = false; | ||
151 | } | ||
152 | |||
153 | for (i = 0; i < numpages; i++) { | ||
154 | unsigned long addr; | ||
155 | pte_t *pte; | ||
156 | |||
157 | if (in_flags & CPA_PAGES_ARRAY) | ||
158 | addr = (unsigned long)page_address(pages[i]); | ||
159 | else | ||
160 | addr = start[i]; | ||
161 | |||
162 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); | ||
163 | |||
164 | if (cache && in_flags & CPA_PAGES_ARRAY) { | ||
165 | /* cache flush all pages including high mem pages. */ | ||
166 | if (flush_inner) | ||
167 | __flush_dcache_page( | ||
168 | page_mapping(pages[i]), pages[i]); | ||
169 | base = page_to_phys(pages[i]); | ||
170 | outer_flush_range(base, base + PAGE_SIZE); | ||
171 | } else if (cache) { | ||
172 | pte = lookup_address(addr, &level); | ||
173 | |||
174 | /* | ||
175 | * Only flush present addresses: | ||
176 | */ | ||
177 | if (pte && pte_present(*pte)) { | ||
178 | __cpuc_flush_dcache_area((void *)addr, | ||
179 | PAGE_SIZE); | ||
180 | outer_flush_range(__pa((void *)addr), | ||
181 | __pa((void *)addr) + PAGE_SIZE); | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * Certain areas of memory require very specific protection flags, | ||
189 | * for example the kernel text. Callers don't always get this | ||
190 | * right so this function checks and fixes these known static | ||
191 | * required protection bits. | ||
192 | */ | ||
193 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | ||
194 | unsigned long pfn) | ||
195 | { | ||
196 | pgprot_t forbidden = __pgprot(0); | ||
197 | |||
198 | /* | ||
199 | * The kernel text needs to be executable for obvious reasons | ||
200 | * Does not cover __inittext since that is gone later on. | ||
201 | */ | ||
202 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | ||
203 | pgprot_val(forbidden) |= L_PTE_XN; | ||
204 | |||
205 | /* | ||
206 | * The .rodata section needs to be read-only. Using the pfn | ||
207 | * catches all aliases. | ||
208 | */ | ||
209 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, | ||
210 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | ||
211 | prot |= L_PTE_RDONLY; | ||
212 | |||
213 | /* | ||
214 | * Mask off the forbidden bits and set the bits that are needed | ||
215 | */ | ||
216 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | ||
217 | |||
218 | |||
219 | return prot; | ||
220 | } | ||
221 | |||
222 | static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, | ||
223 | unsigned long ext_prot) | ||
224 | { | ||
225 | pgprot_t ref_prot; | ||
226 | |||
227 | ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | ||
228 | |||
229 | if (pte & L_PTE_MT_BUFFERABLE) | ||
230 | ref_prot |= PMD_SECT_BUFFERABLE; | ||
231 | |||
232 | if (pte & L_PTE_MT_WRITETHROUGH) | ||
233 | ref_prot |= PMD_SECT_CACHEABLE; | ||
234 | |||
235 | if (pte & L_PTE_SHARED) | ||
236 | ref_prot |= PMD_SECT_S; | ||
237 | |||
238 | if (pte & L_PTE_XN) | ||
239 | ref_prot |= PMD_SECT_XN; | ||
240 | |||
241 | if (pte & L_PTE_RDONLY) | ||
242 | ref_prot &= ~PMD_SECT_AP_WRITE; | ||
243 | |||
244 | ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX | | ||
245 | PTE_EXT_NG | (7 << 6))) << 6; | ||
246 | |||
247 | return ref_prot; | ||
248 | } | ||
249 | |||
250 | static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd, | ||
251 | unsigned long *ext_prot) | ||
252 | { | ||
253 | pgprot_t ref_prot = 0; | ||
254 | |||
255 | ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY; | ||
256 | |||
257 | if (pmd & PMD_SECT_BUFFERABLE) | ||
258 | ref_prot |= L_PTE_MT_BUFFERABLE; | ||
259 | |||
260 | if (pmd & PMD_SECT_CACHEABLE) | ||
261 | ref_prot |= L_PTE_MT_WRITETHROUGH; | ||
262 | |||
263 | if (pmd & PMD_SECT_S) | ||
264 | ref_prot |= L_PTE_SHARED; | ||
265 | |||
266 | if (pmd & PMD_SECT_XN) | ||
267 | ref_prot |= L_PTE_XN; | ||
268 | |||
269 | if (pmd & PMD_SECT_AP_WRITE) | ||
270 | ref_prot &= ~L_PTE_RDONLY; | ||
271 | |||
272 | /* AP/APX/TEX bits */ | ||
273 | *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | | ||
274 | PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6; | ||
275 | |||
276 | return ref_prot; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Lookup the page table entry for a virtual address. Return a pointer | ||
281 | * to the entry and the level of the mapping. | ||
282 | * | ||
283 | * Note: We return pud and pmd either when the entry is marked large | ||
284 | * or when the present bit is not set. Otherwise we would return a | ||
285 | * pointer to a nonexisting mapping. | ||
286 | */ | ||
287 | pte_t *lookup_address(unsigned long address, unsigned int *level) | ||
288 | { | ||
289 | pgd_t *pgd = pgd_offset_k(address); | ||
290 | pte_t *pte; | ||
291 | pmd_t *pmd; | ||
292 | |||
293 | /* pmds are folded into pgds on ARM */ | ||
294 | *level = PG_LEVEL_NONE; | ||
295 | |||
296 | if (pgd == NULL || pgd_none(*pgd)) | ||
297 | return NULL; | ||
298 | |||
299 | pmd = pmd_offset(pgd, address); | ||
300 | |||
301 | if (pmd == NULL || pmd_none(*pmd) || !pmd_present(*pmd)) | ||
302 | return NULL; | ||
303 | |||
304 | if (((pmd_val(*pmd) & (PMD_TYPE_SECT | PMD_SECT_SUPER)) | ||
305 | == (PMD_TYPE_SECT | PMD_SECT_SUPER)) || !pmd_present(*pmd)) { | ||
306 | |||
307 | return NULL; | ||
308 | } else if (pmd_val(*pmd) & PMD_TYPE_SECT) { | ||
309 | |||
310 | *level = PG_LEVEL_2M; | ||
311 | return (pte_t *)pmd; | ||
312 | } | ||
313 | |||
314 | pte = pte_offset_kernel(pmd, address); | ||
315 | |||
316 | if ((pte == NULL) || pte_none(*pte)) | ||
317 | return NULL; | ||
318 | |||
319 | *level = PG_LEVEL_4K; | ||
320 | |||
321 | return pte; | ||
322 | } | ||
323 | EXPORT_SYMBOL_GPL(lookup_address); | ||
324 | |||
325 | /* | ||
326 | * Set the new pmd in all the pgds we know about: | ||
327 | */ | ||
328 | static void __set_pmd_pte(pmd_t *pmd, unsigned long address, pte_t *pte) | ||
329 | { | ||
330 | struct page *page; | ||
331 | |||
332 | cpa_debug("__set_pmd_pte %x %x %x\n", pmd, pte, *pte); | ||
333 | |||
334 | /* change init_mm */ | ||
335 | pmd_populate_kernel(&init_mm, pmd, pte); | ||
336 | |||
337 | /* change entry in all the pgd's */ | ||
338 | list_for_each_entry(page, &pgd_list, lru) { | ||
339 | cpa_debug("list %x %x %x\n", (unsigned long)page, | ||
340 | (unsigned long)pgd_index(address), address); | ||
341 | pmd = pmd_offset(((pgd_t *)page_address(page)) + | ||
342 | pgd_index(address), address); | ||
343 | pmd_populate_kernel(NULL, pmd, pte); | ||
344 | } | ||
345 | |||
346 | } | ||
347 | |||
348 | static int | ||
349 | try_preserve_large_page(pte_t *kpte, unsigned long address, | ||
350 | struct cpa_data *cpa) | ||
351 | { | ||
352 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | ||
353 | pte_t old_pte, *tmp; | ||
354 | pgprot_t old_prot, new_prot, ext_prot, req_prot; | ||
355 | int i, do_split = 1; | ||
356 | unsigned int level; | ||
357 | |||
358 | if (cpa->force_split) | ||
359 | return 1; | ||
360 | |||
361 | spin_lock_irqsave(&pgd_lock, flags); | ||
362 | /* | ||
363 | * Check for races, another CPU might have split this page | ||
364 | * up already: | ||
365 | */ | ||
366 | tmp = lookup_address(address, &level); | ||
367 | if (tmp != kpte) | ||
368 | goto out_unlock; | ||
369 | |||
370 | switch (level) { | ||
371 | |||
372 | case PG_LEVEL_2M: | ||
373 | psize = PMD_SIZE; | ||
374 | pmask = PMD_MASK; | ||
375 | break; | ||
376 | |||
377 | default: | ||
378 | do_split = -EINVAL; | ||
379 | goto out_unlock; | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Calculate the number of pages, which fit into this large | ||
384 | * page starting at address: | ||
385 | */ | ||
386 | nextpage_addr = (address + psize) & pmask; | ||
387 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | ||
388 | if (numpages < cpa->numpages) | ||
389 | cpa->numpages = numpages; | ||
390 | |||
391 | old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte), | ||
392 | &ext_prot); | ||
393 | |||
394 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); | ||
395 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | ||
396 | |||
397 | /* | ||
398 | * old_pte points to the large page base address. So we need | ||
399 | * to add the offset of the virtual address: | ||
400 | */ | ||
401 | pfn = pmd_pfn(*kpte) + ((address & (psize - 1)) >> PAGE_SHIFT); | ||
402 | cpa->pfn = pfn; | ||
403 | |||
404 | new_prot = static_protections(req_prot, address, pfn); | ||
405 | |||
406 | /* | ||
407 | * We need to check the full range, whether | ||
408 | * static_protection() requires a different pgprot for one of | ||
409 | * the pages in the range we try to preserve: | ||
410 | */ | ||
411 | addr = address & pmask; | ||
412 | pfn = pmd_pfn(old_pte); | ||
413 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { | ||
414 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); | ||
415 | |||
416 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | ||
417 | goto out_unlock; | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * If there are no changes, return. maxpages has been updated | ||
422 | * above: | ||
423 | */ | ||
424 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | ||
425 | do_split = 0; | ||
426 | goto out_unlock; | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * convert prot to pmd format | ||
431 | */ | ||
432 | new_prot = pte_to_pmd_pgprot(new_prot, ext_prot); | ||
433 | |||
434 | /* | ||
435 | * We need to change the attributes. Check, whether we can | ||
436 | * change the large page in one go. We request a split, when | ||
437 | * the address is not aligned and the number of pages is | ||
438 | * smaller than the number of pages in the large page. Note | ||
439 | * that we limited the number of possible pages already to | ||
440 | * the number of pages in the large page. | ||
441 | */ | ||
442 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { | ||
443 | /* | ||
444 | * The address is aligned and the number of pages | ||
445 | * covers the full page. | ||
446 | */ | ||
447 | phys_addr_t phys = __pfn_to_phys(pmd_pfn(*kpte)); | ||
448 | pmd_t *p = (pmd_t *)kpte; | ||
449 | |||
450 | *kpte++ = __pmd(phys | new_prot); | ||
451 | *kpte = __pmd((phys + SECTION_SIZE) | new_prot); | ||
452 | flush_pmd_entry(p); | ||
453 | cpa->flags |= CPA_FLUSHTLB; | ||
454 | do_split = 0; | ||
455 | cpa_debug("preserving page at phys %x pmd %x\n", phys, p); | ||
456 | } | ||
457 | |||
458 | out_unlock: | ||
459 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
460 | |||
461 | return do_split; | ||
462 | } | ||
463 | |||
464 | static int split_large_page(pte_t *kpte, unsigned long address) | ||
465 | { | ||
466 | unsigned long flags, pfn, pfninc = 1; | ||
467 | unsigned int i, level; | ||
468 | pte_t *pbase, *tmp; | ||
469 | pgprot_t ref_prot = 0, ext_prot = 0; | ||
470 | int ret = 0; | ||
471 | |||
472 | pbase = pte_alloc_one_kernel(&init_mm, address); | ||
473 | if (!pbase) | ||
474 | return -ENOMEM; | ||
475 | |||
476 | cpa_debug("split_large_page %x PMD %x new pte @ %x\n", address, | ||
477 | *kpte, pbase); | ||
478 | |||
479 | spin_lock_irqsave(&pgd_lock, flags); | ||
480 | /* | ||
481 | * Check for races, another CPU might have split this page | ||
482 | * up for us already: | ||
483 | */ | ||
484 | tmp = lookup_address(address, &level); | ||
485 | if (tmp != kpte) | ||
486 | goto out_unlock; | ||
487 | |||
488 | /* | ||
489 | * we only split 2MB entries for now | ||
490 | */ | ||
491 | if (level != PG_LEVEL_2M) { | ||
492 | ret = -EINVAL; | ||
493 | goto out_unlock; | ||
494 | } | ||
495 | |||
496 | ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); | ||
497 | |||
498 | /* | ||
499 | * Get the target pfn from the original entry: | ||
500 | */ | ||
501 | pfn = pmd_pfn(*kpte); | ||
502 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) | ||
503 | set_pte_ext(&pbase[i], pfn_pte(pfn, ref_prot), ext_prot); | ||
504 | |||
505 | if (address >= (unsigned long)__va(0) && | ||
506 | address < (unsigned long)__va(lowmem_limit)) | ||
507 | split_page_count(level); | ||
508 | |||
509 | /* | ||
510 | * Install the new, split up pagetable. | ||
511 | */ | ||
512 | __set_pmd_pte((pmd_t *)kpte, address, pbase); | ||
513 | |||
514 | pbase = NULL; | ||
515 | |||
516 | out_unlock: | ||
517 | /* | ||
518 | * If we dropped out via the lookup_address check under | ||
519 | * pgd_lock then stick the page back into the pool: | ||
520 | */ | ||
521 | if (pbase) | ||
522 | pte_free_kernel(&init_mm, pbase); | ||
523 | |||
524 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
525 | |||
526 | return ret; | ||
527 | } | ||
528 | |||
529 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | ||
530 | int primary) | ||
531 | { | ||
532 | /* | ||
533 | * Ignore all non primary paths. | ||
534 | */ | ||
535 | if (!primary) | ||
536 | return 0; | ||
537 | |||
538 | /* | ||
539 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | ||
540 | * to have holes. | ||
541 | * Also set numpages to '1' indicating that we processed cpa req for | ||
542 | * one virtual address page and its pfn. TBD: numpages can be set based | ||
543 | * on the initial value and the level returned by lookup_address(). | ||
544 | */ | ||
545 | if (within(vaddr, PAGE_OFFSET, | ||
546 | PAGE_OFFSET + lowmem_limit)) { | ||
547 | cpa->numpages = 1; | ||
548 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | ||
549 | return 0; | ||
550 | } else { | ||
551 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
552 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | ||
553 | *cpa->vaddr); | ||
554 | |||
555 | return -EFAULT; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | static int __change_page_attr(struct cpa_data *cpa, int primary) | ||
560 | { | ||
561 | unsigned long address; | ||
562 | int do_split, err; | ||
563 | unsigned int level; | ||
564 | pte_t *kpte, old_pte; | ||
565 | |||
566 | if (cpa->flags & CPA_PAGES_ARRAY) { | ||
567 | struct page *page = cpa->pages[cpa->curpage]; | ||
568 | |||
569 | if (unlikely(PageHighMem(page))) | ||
570 | return 0; | ||
571 | |||
572 | address = (unsigned long)page_address(page); | ||
573 | |||
574 | } else if (cpa->flags & CPA_ARRAY) | ||
575 | address = cpa->vaddr[cpa->curpage]; | ||
576 | else | ||
577 | address = *cpa->vaddr; | ||
578 | |||
579 | repeat: | ||
580 | kpte = lookup_address(address, &level); | ||
581 | if (!kpte) | ||
582 | return __cpa_process_fault(cpa, address, primary); | ||
583 | |||
584 | old_pte = *kpte; | ||
585 | if (!pte_val(old_pte)) | ||
586 | return __cpa_process_fault(cpa, address, primary); | ||
587 | |||
588 | if (level == PG_LEVEL_4K) { | ||
589 | pte_t new_pte; | ||
590 | pgprot_t new_prot = pte_pgprot(old_pte); | ||
591 | unsigned long pfn = pte_pfn(old_pte); | ||
592 | |||
593 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | ||
594 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | ||
595 | |||
596 | new_prot = static_protections(new_prot, address, pfn); | ||
597 | |||
598 | /* | ||
599 | * We need to keep the pfn from the existing PTE, | ||
600 | * after all we're only going to change it's attributes | ||
601 | * not the memory it points to | ||
602 | */ | ||
603 | new_pte = pfn_pte(pfn, new_prot); | ||
604 | cpa->pfn = pfn; | ||
605 | |||
606 | /* | ||
607 | * Do we really change anything ? | ||
608 | */ | ||
609 | if (pte_val(old_pte) != pte_val(new_pte)) { | ||
610 | set_pte_ext(kpte, new_pte, 0); | ||
611 | /* | ||
612 | * FIXME : is this needed on arm? | ||
613 | * set_pte_ext already does a flush | ||
614 | */ | ||
615 | cpa->flags |= CPA_FLUSHTLB; | ||
616 | } | ||
617 | cpa->numpages = 1; | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * Check, whether we can keep the large page intact | ||
623 | * and just change the pte: | ||
624 | */ | ||
625 | do_split = try_preserve_large_page(kpte, address, cpa); | ||
626 | |||
627 | /* | ||
628 | * When the range fits into the existing large page, | ||
629 | * return. cp->numpages and cpa->tlbflush have been updated in | ||
630 | * try_large_page: | ||
631 | */ | ||
632 | if (do_split <= 0) | ||
633 | return do_split; | ||
634 | |||
635 | /* | ||
636 | * We have to split the large page: | ||
637 | */ | ||
638 | err = split_large_page(kpte, address); | ||
639 | |||
640 | if (!err) { | ||
641 | /* | ||
642 | * Do a global flush tlb after splitting the large page | ||
643 | * and before we do the actual change page attribute in the PTE. | ||
644 | * | ||
645 | * With out this, we violate the TLB application note, that says | ||
646 | * "The TLBs may contain both ordinary and large-page | ||
647 | * translations for a 4-KByte range of linear addresses. This | ||
648 | * may occur if software modifies the paging structures so that | ||
649 | * the page size used for the address range changes. If the two | ||
650 | * translations differ with respect to page frame or attributes | ||
651 | * (e.g., permissions), processor behavior is undefined and may | ||
652 | * be implementation-specific." | ||
653 | * | ||
654 | * We do this global tlb flush inside the cpa_lock, so that we | ||
655 | * don't allow any other cpu, with stale tlb entries change the | ||
656 | * page attribute in parallel, that also falls into the | ||
657 | * just split large page entry. | ||
658 | */ | ||
659 | flush_tlb_all(); | ||
660 | goto repeat; | ||
661 | } | ||
662 | |||
663 | return err; | ||
664 | } | ||
665 | |||
666 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); | ||
667 | |||
668 | static int cpa_process_alias(struct cpa_data *cpa) | ||
669 | { | ||
670 | struct cpa_data alias_cpa; | ||
671 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); | ||
672 | unsigned long vaddr; | ||
673 | int ret; | ||
674 | |||
675 | if (cpa->pfn >= (lowmem_limit >> PAGE_SHIFT)) | ||
676 | return 0; | ||
677 | |||
678 | /* | ||
679 | * No need to redo, when the primary call touched the direct | ||
680 | * mapping already: | ||
681 | */ | ||
682 | if (cpa->flags & CPA_PAGES_ARRAY) { | ||
683 | struct page *page = cpa->pages[cpa->curpage]; | ||
684 | if (unlikely(PageHighMem(page))) | ||
685 | return 0; | ||
686 | vaddr = (unsigned long)page_address(page); | ||
687 | } else if (cpa->flags & CPA_ARRAY) | ||
688 | vaddr = cpa->vaddr[cpa->curpage]; | ||
689 | else | ||
690 | vaddr = *cpa->vaddr; | ||
691 | |||
692 | if (!(within(vaddr, PAGE_OFFSET, | ||
693 | PAGE_OFFSET + lowmem_limit))) { | ||
694 | |||
695 | alias_cpa = *cpa; | ||
696 | alias_cpa.vaddr = &laddr; | ||
697 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
698 | |||
699 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | ||
700 | if (ret) | ||
701 | return ret; | ||
702 | } | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | ||
708 | { | ||
709 | int ret, numpages = cpa->numpages; | ||
710 | |||
711 | while (numpages) { | ||
712 | /* | ||
713 | * Store the remaining nr of pages for the large page | ||
714 | * preservation check. | ||
715 | */ | ||
716 | cpa->numpages = numpages; | ||
717 | /* for array changes, we can't use large page */ | ||
718 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) | ||
719 | cpa->numpages = 1; | ||
720 | |||
721 | if (!debug_pagealloc) | ||
722 | mutex_lock(&cpa_lock); | ||
723 | ret = __change_page_attr(cpa, checkalias); | ||
724 | if (!debug_pagealloc) | ||
725 | mutex_unlock(&cpa_lock); | ||
726 | if (ret) | ||
727 | return ret; | ||
728 | |||
729 | if (checkalias) { | ||
730 | ret = cpa_process_alias(cpa); | ||
731 | if (ret) | ||
732 | return ret; | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Adjust the number of pages with the result of the | ||
737 | * CPA operation. Either a large page has been | ||
738 | * preserved or a single page update happened. | ||
739 | */ | ||
740 | BUG_ON(cpa->numpages > numpages); | ||
741 | numpages -= cpa->numpages; | ||
742 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) | ||
743 | cpa->curpage++; | ||
744 | else | ||
745 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; | ||
746 | } | ||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static inline int cache_attr(pgprot_t attr) | ||
751 | { | ||
752 | /* | ||
753 | * We need to flush the cache for all memory type changes | ||
754 | * except when a page is being marked write back cacheable | ||
755 | */ | ||
756 | return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK); | ||
757 | } | ||
758 | |||
759 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | ||
760 | pgprot_t mask_set, pgprot_t mask_clr, | ||
761 | int force_split, int in_flag, | ||
762 | struct page **pages) | ||
763 | { | ||
764 | struct cpa_data cpa; | ||
765 | int ret, cache, checkalias; | ||
766 | unsigned long baddr = 0; | ||
767 | |||
768 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) | ||
769 | return 0; | ||
770 | |||
771 | /* Ensure we are PAGE_SIZE aligned */ | ||
772 | if (in_flag & CPA_ARRAY) { | ||
773 | int i; | ||
774 | for (i = 0; i < numpages; i++) { | ||
775 | if (addr[i] & ~PAGE_MASK) { | ||
776 | addr[i] &= PAGE_MASK; | ||
777 | WARN_ON_ONCE(1); | ||
778 | } | ||
779 | } | ||
780 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { | ||
781 | /* | ||
782 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. | ||
783 | * No need to cehck in that case | ||
784 | */ | ||
785 | if (*addr & ~PAGE_MASK) { | ||
786 | *addr &= PAGE_MASK; | ||
787 | /* | ||
788 | * People should not be passing in unaligned addresses: | ||
789 | */ | ||
790 | WARN_ON_ONCE(1); | ||
791 | } | ||
792 | /* | ||
793 | * Save address for cache flush. *addr is modified in the call | ||
794 | * to __change_page_attr_set_clr() below. | ||
795 | */ | ||
796 | baddr = *addr; | ||
797 | } | ||
798 | |||
799 | /* Must avoid aliasing mappings in the highmem code */ | ||
800 | kmap_flush_unused(); | ||
801 | |||
802 | vm_unmap_aliases(); | ||
803 | |||
804 | cpa.vaddr = addr; | ||
805 | cpa.pages = pages; | ||
806 | cpa.numpages = numpages; | ||
807 | cpa.mask_set = mask_set; | ||
808 | cpa.mask_clr = mask_clr; | ||
809 | cpa.flags = 0; | ||
810 | cpa.curpage = 0; | ||
811 | cpa.force_split = force_split; | ||
812 | |||
813 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) | ||
814 | cpa.flags |= in_flag; | ||
815 | |||
816 | /* No alias checking for XN bit modifications */ | ||
817 | checkalias = (pgprot_val(mask_set) | | ||
818 | pgprot_val(mask_clr)) != L_PTE_XN; | ||
819 | |||
820 | ret = __change_page_attr_set_clr(&cpa, checkalias); | ||
821 | |||
822 | cache = cache_attr(mask_set); | ||
823 | /* | ||
824 | * Check whether we really changed something or | ||
825 | * cache need to be flushed. | ||
826 | */ | ||
827 | if (!(cpa.flags & CPA_FLUSHTLB) && !cache) | ||
828 | goto out; | ||
829 | |||
830 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { | ||
831 | cpa_flush_array(addr, numpages, cache, | ||
832 | cpa.flags, pages); | ||
833 | } else | ||
834 | cpa_flush_range(baddr, numpages, cache); | ||
835 | |||
836 | out: | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | static inline int change_page_attr_set(unsigned long *addr, int numpages, | ||
841 | pgprot_t mask, int array) | ||
842 | { | ||
843 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, | ||
844 | (array ? CPA_ARRAY : 0), NULL); | ||
845 | } | ||
846 | |||
847 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, | ||
848 | pgprot_t mask, int array) | ||
849 | { | ||
850 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, | ||
851 | (array ? CPA_ARRAY : 0), NULL); | ||
852 | } | ||
853 | |||
854 | static inline int cpa_set_pages_array(struct page **pages, int numpages, | ||
855 | pgprot_t mask) | ||
856 | { | ||
857 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, | ||
858 | CPA_PAGES_ARRAY, pages); | ||
859 | } | ||
860 | |||
861 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, | ||
862 | pgprot_t mask) | ||
863 | { | ||
864 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, | ||
865 | CPA_PAGES_ARRAY, pages); | ||
866 | } | ||
867 | |||
868 | int set_memory_uc(unsigned long addr, int numpages) | ||
869 | { | ||
870 | return change_page_attr_set_clr(&addr, numpages, | ||
871 | __pgprot(L_PTE_MT_UNCACHED), | ||
872 | __pgprot(L_PTE_MT_MASK), 0, 0, NULL); | ||
873 | } | ||
874 | EXPORT_SYMBOL(set_memory_uc); | ||
875 | |||
876 | int _set_memory_array(unsigned long *addr, int addrinarray, | ||
877 | unsigned long set, unsigned long clr) | ||
878 | { | ||
879 | return change_page_attr_set_clr(addr, addrinarray, __pgprot(set), | ||
880 | __pgprot(clr), 0, CPA_ARRAY, NULL); | ||
881 | } | ||
882 | |||
883 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | ||
884 | { | ||
885 | return _set_memory_array(addr, addrinarray, | ||
886 | L_PTE_MT_UNCACHED, L_PTE_MT_MASK); | ||
887 | } | ||
888 | EXPORT_SYMBOL(set_memory_array_uc); | ||
889 | |||
890 | int set_memory_array_wc(unsigned long *addr, int addrinarray) | ||
891 | { | ||
892 | return _set_memory_array(addr, addrinarray, | ||
893 | L_PTE_MT_BUFFERABLE, L_PTE_MT_MASK); | ||
894 | } | ||
895 | EXPORT_SYMBOL(set_memory_array_wc); | ||
896 | |||
897 | int set_memory_wc(unsigned long addr, int numpages) | ||
898 | { | ||
899 | int ret; | ||
900 | |||
901 | ret = change_page_attr_set_clr(&addr, numpages, | ||
902 | __pgprot(L_PTE_MT_BUFFERABLE), | ||
903 | __pgprot(L_PTE_MT_MASK), | ||
904 | 0, 0, NULL); | ||
905 | return ret; | ||
906 | } | ||
907 | EXPORT_SYMBOL(set_memory_wc); | ||
908 | |||
909 | int set_memory_wb(unsigned long addr, int numpages) | ||
910 | { | ||
911 | return change_page_attr_set_clr(&addr, numpages, | ||
912 | __pgprot(L_PTE_MT_WRITEBACK), | ||
913 | __pgprot(L_PTE_MT_MASK), | ||
914 | 0, 0, NULL); | ||
915 | } | ||
916 | EXPORT_SYMBOL(set_memory_wb); | ||
917 | |||
918 | int set_memory_iwb(unsigned long addr, int numpages) | ||
919 | { | ||
920 | return change_page_attr_set_clr(&addr, numpages, | ||
921 | __pgprot(L_PTE_MT_INNER_WB), | ||
922 | __pgprot(L_PTE_MT_MASK), | ||
923 | 0, 0, NULL); | ||
924 | } | ||
925 | EXPORT_SYMBOL(set_memory_iwb); | ||
926 | |||
927 | int set_memory_array_wb(unsigned long *addr, int addrinarray) | ||
928 | { | ||
929 | return change_page_attr_set_clr(addr, addrinarray, | ||
930 | __pgprot(L_PTE_MT_WRITEBACK), | ||
931 | __pgprot(L_PTE_MT_MASK), | ||
932 | 0, CPA_ARRAY, NULL); | ||
933 | |||
934 | } | ||
935 | EXPORT_SYMBOL(set_memory_array_wb); | ||
936 | |||
937 | int set_memory_array_iwb(unsigned long *addr, int addrinarray) | ||
938 | { | ||
939 | return change_page_attr_set_clr(addr, addrinarray, | ||
940 | __pgprot(L_PTE_MT_INNER_WB), | ||
941 | __pgprot(L_PTE_MT_MASK), | ||
942 | 0, CPA_ARRAY, NULL); | ||
943 | |||
944 | } | ||
945 | EXPORT_SYMBOL(set_memory_array_iwb); | ||
946 | |||
947 | int set_memory_x(unsigned long addr, int numpages) | ||
948 | { | ||
949 | return change_page_attr_clear(&addr, numpages, | ||
950 | __pgprot(L_PTE_XN), 0); | ||
951 | } | ||
952 | EXPORT_SYMBOL(set_memory_x); | ||
953 | |||
954 | int set_memory_nx(unsigned long addr, int numpages) | ||
955 | { | ||
956 | return change_page_attr_set(&addr, numpages, | ||
957 | __pgprot(L_PTE_XN), 0); | ||
958 | } | ||
959 | EXPORT_SYMBOL(set_memory_nx); | ||
960 | |||
961 | int set_memory_ro(unsigned long addr, int numpages) | ||
962 | { | ||
963 | return change_page_attr_set(&addr, numpages, | ||
964 | __pgprot(L_PTE_RDONLY), 0); | ||
965 | } | ||
966 | EXPORT_SYMBOL_GPL(set_memory_ro); | ||
967 | |||
968 | int set_memory_rw(unsigned long addr, int numpages) | ||
969 | { | ||
970 | return change_page_attr_clear(&addr, numpages, | ||
971 | __pgprot(L_PTE_RDONLY), 0); | ||
972 | } | ||
973 | EXPORT_SYMBOL_GPL(set_memory_rw); | ||
974 | |||
975 | int set_memory_np(unsigned long addr, int numpages) | ||
976 | { | ||
977 | return change_page_attr_clear(&addr, numpages, | ||
978 | __pgprot(L_PTE_PRESENT), 0); | ||
979 | } | ||
980 | |||
981 | int set_memory_4k(unsigned long addr, int numpages) | ||
982 | { | ||
983 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), | ||
984 | __pgprot(0), 1, 0, NULL); | ||
985 | } | ||
986 | |||
987 | static int _set_pages_array(struct page **pages, int addrinarray, | ||
988 | unsigned long set, unsigned long clr) | ||
989 | { | ||
990 | return change_page_attr_set_clr(NULL, addrinarray, | ||
991 | __pgprot(set), | ||
992 | __pgprot(clr), | ||
993 | 0, CPA_PAGES_ARRAY, pages); | ||
994 | } | ||
995 | |||
996 | int set_pages_array_uc(struct page **pages, int addrinarray) | ||
997 | { | ||
998 | return _set_pages_array(pages, addrinarray, | ||
999 | L_PTE_MT_UNCACHED, L_PTE_MT_MASK); | ||
1000 | } | ||
1001 | EXPORT_SYMBOL(set_pages_array_uc); | ||
1002 | |||
1003 | int set_pages_array_wc(struct page **pages, int addrinarray) | ||
1004 | { | ||
1005 | return _set_pages_array(pages, addrinarray, L_PTE_MT_BUFFERABLE, | ||
1006 | L_PTE_MT_MASK); | ||
1007 | } | ||
1008 | EXPORT_SYMBOL(set_pages_array_wc); | ||
1009 | |||
1010 | int set_pages_array_wb(struct page **pages, int addrinarray) | ||
1011 | { | ||
1012 | return _set_pages_array(pages, addrinarray, | ||
1013 | L_PTE_MT_WRITEBACK, L_PTE_MT_MASK); | ||
1014 | } | ||
1015 | EXPORT_SYMBOL(set_pages_array_wb); | ||
1016 | |||
1017 | int set_pages_array_iwb(struct page **pages, int addrinarray) | ||
1018 | { | ||
1019 | return _set_pages_array(pages, addrinarray, | ||
1020 | L_PTE_MT_INNER_WB, L_PTE_MT_MASK); | ||
1021 | } | ||
1022 | EXPORT_SYMBOL(set_pages_array_iwb); | ||
1023 | |||
1024 | #else /* CONFIG_CPA */ | ||
1025 | |||
1026 | void update_page_count(int level, unsigned long pages) | ||
1027 | { | ||
1028 | } | ||
1029 | |||
1030 | static void flush_cache(struct page **pages, int numpages) | ||
1031 | { | ||
1032 | unsigned int i; | ||
1033 | bool flush_inner = true; | ||
1034 | unsigned long base; | ||
1035 | |||
1036 | if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD) { | ||
1037 | inner_flush_cache_all(); | ||
1038 | flush_inner = false; | ||
1039 | } | ||
1040 | |||
1041 | for (i = 0; i < numpages; i++) { | ||
1042 | if (flush_inner) | ||
1043 | __flush_dcache_page(page_mapping(pages[i]), pages[i]); | ||
1044 | base = page_to_phys(pages[i]); | ||
1045 | outer_flush_range(base, base + PAGE_SIZE); | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | int set_pages_array_uc(struct page **pages, int addrinarray) | ||
1050 | { | ||
1051 | flush_cache(pages, addrinarray); | ||
1052 | return 0; | ||
1053 | } | ||
1054 | EXPORT_SYMBOL(set_pages_array_uc); | ||
1055 | |||
1056 | int set_pages_array_wc(struct page **pages, int addrinarray) | ||
1057 | { | ||
1058 | flush_cache(pages, addrinarray); | ||
1059 | return 0; | ||
1060 | } | ||
1061 | EXPORT_SYMBOL(set_pages_array_wc); | ||
1062 | |||
1063 | int set_pages_array_wb(struct page **pages, int addrinarray) | ||
1064 | { | ||
1065 | return 0; | ||
1066 | } | ||
1067 | EXPORT_SYMBOL(set_pages_array_wb); | ||
1068 | |||
1069 | int set_pages_array_iwb(struct page **pages, int addrinarray) | ||
1070 | { | ||
1071 | flush_cache(pages, addrinarray); | ||
1072 | return 0; | ||
1073 | } | ||
1074 | EXPORT_SYMBOL(set_pages_array_iwb); | ||
1075 | |||
1076 | #endif | ||
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S new file mode 100644 index 00000000000..e5b974cddac --- /dev/null +++ b/arch/arm/mm/proc-arm6_7.S | |||
@@ -0,0 +1,323 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/proc-arm6,7.S | ||
3 | * | ||
4 | * Copyright (C) 1997-2000 Russell King | ||
5 | * hacked for non-paged-MM by Hyok S. Choi, 2003. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * These are the low level assembler for performing cache and TLB | ||
12 | * functions on the ARM610 & ARM710. | ||
13 | */ | ||
14 | #include <linux/linkage.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/assembler.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/hwcap.h> | ||
19 | #include <asm/pgtable-hwdef.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/ptrace.h> | ||
22 | |||
23 | #include "proc-macros.S" | ||
24 | |||
25 | ENTRY(cpu_arm6_dcache_clean_area) | ||
26 | ENTRY(cpu_arm7_dcache_clean_area) | ||
27 | mov pc, lr | ||
28 | |||
29 | /* | ||
30 | * Function: arm6_7_data_abort () | ||
31 | * | ||
32 | * Params : r2 = pt_regs | ||
33 | * : r4 = aborted context pc | ||
34 | * : r5 = aborted context psr | ||
35 | * | ||
36 | * Purpose : obtain information about current aborted instruction | ||
37 | * | ||
38 | * Returns : r4-r5, r10-r11, r13 preserved | ||
39 | */ | ||
40 | |||
41 | ENTRY(cpu_arm7_data_abort) | ||
42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | ||
43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | ||
44 | ldr r8, [r4] @ read arm instruction | ||
45 | tst r8, #1 << 20 @ L = 0 -> write? | ||
46 | orreq r1, r1, #1 << 11 @ yes. | ||
47 | and r7, r8, #15 << 24 | ||
48 | add pc, pc, r7, lsr #22 @ Now branch to the relevant processing routine | ||
49 | nop | ||
50 | |||
51 | /* 0 */ b .data_unknown | ||
52 | /* 1 */ b do_DataAbort @ swp | ||
53 | /* 2 */ b .data_unknown | ||
54 | /* 3 */ b .data_unknown | ||
55 | /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m | ||
56 | /* 5 */ b .data_arm_lateldrpreconst @ ldr rd, [rn, #m] | ||
57 | /* 6 */ b .data_arm_lateldrpostreg @ ldr rd, [rn], rm | ||
58 | /* 7 */ b .data_arm_lateldrprereg @ ldr rd, [rn, rm] | ||
59 | /* 8 */ b .data_arm_ldmstm @ ldm*a rn, <rlist> | ||
60 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> | ||
61 | /* a */ b .data_unknown | ||
62 | /* b */ b .data_unknown | ||
63 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | ||
64 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] | ||
65 | /* e */ b .data_unknown | ||
66 | /* f */ | ||
67 | .data_unknown: @ Part of jumptable | ||
68 | mov r0, r4 | ||
69 | mov r1, r8 | ||
70 | b baddataabort | ||
71 | |||
72 | ENTRY(cpu_arm6_data_abort) | ||
73 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | ||
74 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | ||
75 | ldr r8, [r4] @ read arm instruction | ||
76 | tst r8, #1 << 20 @ L = 0 -> write? | ||
77 | orreq r1, r1, #1 << 11 @ yes. | ||
78 | and r7, r8, #14 << 24 | ||
79 | teq r7, #8 << 24 @ was it ldm/stm | ||
80 | bne do_DataAbort | ||
81 | |||
82 | .data_arm_ldmstm: | ||
83 | tst r8, #1 << 21 @ check writeback bit | ||
84 | beq do_DataAbort @ no writeback -> no fixup | ||
85 | mov r7, #0x11 | ||
86 | orr r7, r7, #0x1100 | ||
87 | and r6, r8, r7 | ||
88 | and r9, r8, r7, lsl #1 | ||
89 | add r6, r6, r9, lsr #1 | ||
90 | and r9, r8, r7, lsl #2 | ||
91 | add r6, r6, r9, lsr #2 | ||
92 | and r9, r8, r7, lsl #3 | ||
93 | add r6, r6, r9, lsr #3 | ||
94 | add r6, r6, r6, lsr #8 | ||
95 | add r6, r6, r6, lsr #4 | ||
96 | and r6, r6, #15 @ r6 = no. of registers to transfer. | ||
97 | and r9, r8, #15 << 16 @ Extract 'n' from instruction | ||
98 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' | ||
99 | tst r8, #1 << 23 @ Check U bit | ||
100 | subne r7, r7, r6, lsl #2 @ Undo increment | ||
101 | addeq r7, r7, r6, lsl #2 @ Undo decrement | ||
102 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' | ||
103 | b do_DataAbort | ||
104 | |||
105 | .data_arm_apply_r6_and_rn: | ||
106 | and r9, r8, #15 << 16 @ Extract 'n' from instruction | ||
107 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' | ||
108 | tst r8, #1 << 23 @ Check U bit | ||
109 | subne r7, r7, r6 @ Undo incrmenet | ||
110 | addeq r7, r7, r6 @ Undo decrement | ||
111 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' | ||
112 | b do_DataAbort | ||
113 | |||
114 | .data_arm_lateldrpreconst: | ||
115 | tst r8, #1 << 21 @ check writeback bit | ||
116 | beq do_DataAbort @ no writeback -> no fixup | ||
117 | .data_arm_lateldrpostconst: | ||
118 | movs r6, r8, lsl #20 @ Get offset | ||
119 | beq do_DataAbort @ zero -> no fixup | ||
120 | and r9, r8, #15 << 16 @ Extract 'n' from instruction | ||
121 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' | ||
122 | tst r8, #1 << 23 @ Check U bit | ||
123 | subne r7, r7, r6, lsr #20 @ Undo increment | ||
124 | addeq r7, r7, r6, lsr #20 @ Undo decrement | ||
125 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' | ||
126 | b do_DataAbort | ||
127 | |||
128 | .data_arm_lateldrprereg: | ||
129 | tst r8, #1 << 21 @ check writeback bit | ||
130 | beq do_DataAbort @ no writeback -> no fixup | ||
131 | .data_arm_lateldrpostreg: | ||
132 | and r7, r8, #15 @ Extract 'm' from instruction | ||
133 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' | ||
134 | mov r9, r8, lsr #7 @ get shift count | ||
135 | ands r9, r9, #31 | ||
136 | and r7, r8, #0x70 @ get shift type | ||
137 | orreq r7, r7, #8 @ shift count = 0 | ||
138 | add pc, pc, r7 | ||
139 | nop | ||
140 | |||
141 | mov r6, r6, lsl r9 @ 0: LSL #!0 | ||
142 | b .data_arm_apply_r6_and_rn | ||
143 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | ||
144 | nop | ||
145 | b .data_unknown @ 2: MUL? | ||
146 | nop | ||
147 | b .data_unknown @ 3: MUL? | ||
148 | nop | ||
149 | mov r6, r6, lsr r9 @ 4: LSR #!0 | ||
150 | b .data_arm_apply_r6_and_rn | ||
151 | mov r6, r6, lsr #32 @ 5: LSR #32 | ||
152 | b .data_arm_apply_r6_and_rn | ||
153 | b .data_unknown @ 6: MUL? | ||
154 | nop | ||
155 | b .data_unknown @ 7: MUL? | ||
156 | nop | ||
157 | mov r6, r6, asr r9 @ 8: ASR #!0 | ||
158 | b .data_arm_apply_r6_and_rn | ||
159 | mov r6, r6, asr #32 @ 9: ASR #32 | ||
160 | b .data_arm_apply_r6_and_rn | ||
161 | b .data_unknown @ A: MUL? | ||
162 | nop | ||
163 | b .data_unknown @ B: MUL? | ||
164 | nop | ||
165 | mov r6, r6, ror r9 @ C: ROR #!0 | ||
166 | b .data_arm_apply_r6_and_rn | ||
167 | mov r6, r6, rrx @ D: RRX | ||
168 | b .data_arm_apply_r6_and_rn | ||
169 | b .data_unknown @ E: MUL? | ||
170 | nop | ||
171 | b .data_unknown @ F: MUL? | ||
172 | |||
173 | /* | ||
174 | * Function: arm6_7_proc_init (void) | ||
175 | * : arm6_7_proc_fin (void) | ||
176 | * | ||
177 | * Notes : This processor does not require these | ||
178 | */ | ||
179 | ENTRY(cpu_arm6_proc_init) | ||
180 | ENTRY(cpu_arm7_proc_init) | ||
181 | mov pc, lr | ||
182 | |||
183 | ENTRY(cpu_arm6_proc_fin) | ||
184 | ENTRY(cpu_arm7_proc_fin) | ||
185 | mov r0, #0x31 @ ....S..DP...M | ||
186 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | ||
187 | mov pc, lr | ||
188 | |||
189 | ENTRY(cpu_arm6_do_idle) | ||
190 | ENTRY(cpu_arm7_do_idle) | ||
191 | mov pc, lr | ||
192 | |||
193 | /* | ||
194 | * Function: arm6_7_switch_mm(unsigned long pgd_phys) | ||
195 | * Params : pgd_phys Physical address of page table | ||
196 | * Purpose : Perform a task switch, saving the old processes state, and restoring | ||
197 | * the new. | ||
198 | */ | ||
199 | ENTRY(cpu_arm6_switch_mm) | ||
200 | ENTRY(cpu_arm7_switch_mm) | ||
201 | #ifdef CONFIG_MMU | ||
202 | mov r1, #0 | ||
203 | mcr p15, 0, r1, c7, c0, 0 @ flush cache | ||
204 | mcr p15, 0, r0, c2, c0, 0 @ update page table ptr | ||
205 | mcr p15, 0, r1, c5, c0, 0 @ flush TLBs | ||
206 | #endif | ||
207 | mov pc, lr | ||
208 | |||
209 | /* | ||
210 | * Function: arm6_7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) | ||
211 | * Params : r0 = Address to set | ||
212 | * : r1 = value to set | ||
213 | * Purpose : Set a PTE and flush it out of any WB cache | ||
214 | */ | ||
215 | .align 5 | ||
216 | ENTRY(cpu_arm6_set_pte_ext) | ||
217 | ENTRY(cpu_arm7_set_pte_ext) | ||
218 | #ifdef CONFIG_MMU | ||
219 | armv3_set_pte_ext wc_disable=0 | ||
220 | #endif /* CONFIG_MMU */ | ||
221 | mov pc, lr | ||
222 | |||
223 | /* | ||
224 | * Function: _arm6_7_reset | ||
225 | * Params : r0 = address to jump to | ||
226 | * Notes : This sets up everything for a reset | ||
227 | */ | ||
228 | ENTRY(cpu_arm6_reset) | ||
229 | ENTRY(cpu_arm7_reset) | ||
230 | mov r1, #0 | ||
231 | mcr p15, 0, r1, c7, c0, 0 @ flush cache | ||
232 | #ifdef CONFIG_MMU | ||
233 | mcr p15, 0, r1, c5, c0, 0 @ flush TLB | ||
234 | #endif | ||
235 | mov r1, #0x30 | ||
236 | mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc | ||
237 | mov pc, r0 | ||
238 | |||
239 | __CPUINIT | ||
240 | |||
241 | .type __arm6_setup, #function | ||
242 | __arm6_setup: mov r0, #0 | ||
243 | mcr p15, 0, r0, c7, c0 @ flush caches on v3 | ||
244 | #ifdef CONFIG_MMU | ||
245 | mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 | ||
246 | mov r0, #0x3d @ . ..RS BLDP WCAM | ||
247 | orr r0, r0, #0x100 @ . ..01 0011 1101 | ||
248 | #else | ||
249 | mov r0, #0x3c @ . ..RS BLDP WCA. | ||
250 | #endif | ||
251 | mov pc, lr | ||
252 | .size __arm6_setup, . - __arm6_setup | ||
253 | |||
254 | .type __arm7_setup, #function | ||
255 | __arm7_setup: mov r0, #0 | ||
256 | mcr p15, 0, r0, c7, c0 @ flush caches on v3 | ||
257 | #ifdef CONFIG_MMU | ||
258 | mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 | ||
259 | mcr p15, 0, r0, c3, c0 @ load domain access register | ||
260 | mov r0, #0x7d @ . ..RS BLDP WCAM | ||
261 | orr r0, r0, #0x100 @ . ..01 0111 1101 | ||
262 | #else | ||
263 | mov r0, #0x7c @ . ..RS BLDP WCA. | ||
264 | #endif | ||
265 | mov pc, lr | ||
266 | .size __arm7_setup, . - __arm7_setup | ||
267 | |||
268 | __INITDATA | ||
269 | |||
270 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | ||
271 | define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort | ||
272 | define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort | ||
273 | |||
274 | .section ".rodata" | ||
275 | |||
276 | string cpu_arch_name, "armv3" | ||
277 | string cpu_elf_name, "v3" | ||
278 | string cpu_arm6_name, "ARM6" | ||
279 | string cpu_arm610_name, "ARM610" | ||
280 | string cpu_arm7_name, "ARM7" | ||
281 | string cpu_arm710_name, "ARM710" | ||
282 | |||
283 | .align | ||
284 | |||
285 | .section ".proc.info.init", #alloc, #execinstr | ||
286 | |||
287 | .macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ | ||
288 | cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req | ||
289 | .type __\name\()_proc_info, #object | ||
290 | __\name\()_proc_info: | ||
291 | .long \cpu_val | ||
292 | .long \cpu_mask | ||
293 | .long \cpu_mm_mmu_flags | ||
294 | .long PMD_TYPE_SECT | \ | ||
295 | PMD_BIT4 | \ | ||
296 | PMD_SECT_AP_WRITE | \ | ||
297 | PMD_SECT_AP_READ | ||
298 | b \cpu_flush | ||
299 | .long cpu_arch_name | ||
300 | .long cpu_elf_name | ||
301 | .long HWCAP_SWP | HWCAP_26BIT | ||
302 | .long \cpu_name | ||
303 | .long \cpu_proc_funcs | ||
304 | .long v3_tlb_fns | ||
305 | .long v3_user_fns | ||
306 | .long v3_cache_fns | ||
307 | .size __\name\()_proc_info, . - __\name\()_proc_info | ||
308 | .endm | ||
309 | |||
310 | arm67_proc_info arm6, 0x41560600, 0xfffffff0, cpu_arm6_name, \ | ||
311 | 0x00000c1e, __arm6_setup, arm6_processor_functions | ||
312 | arm67_proc_info arm610, 0x41560610, 0xfffffff0, cpu_arm610_name, \ | ||
313 | 0x00000c1e, __arm6_setup, arm6_processor_functions | ||
314 | arm67_proc_info arm7, 0x41007000, 0xffffff00, cpu_arm7_name, \ | ||
315 | 0x00000c1e, __arm7_setup, arm7_processor_functions | ||
316 | arm67_proc_info arm710, 0x41007100, 0xfff8ff00, cpu_arm710_name, \ | ||
317 | PMD_TYPE_SECT | \ | ||
318 | PMD_SECT_BUFFERABLE | \ | ||
319 | PMD_SECT_CACHEABLE | \ | ||
320 | PMD_BIT4 | \ | ||
321 | PMD_SECT_AP_WRITE | \ | ||
322 | PMD_SECT_AP_READ, \ | ||
323 | __arm7_setup, arm7_processor_functions | ||
diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S new file mode 100644 index 00000000000..d253995ec4c --- /dev/null +++ b/arch/arm/mm/tlb-v3.S | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/tlbv3.S | ||
3 | * | ||
4 | * Copyright (C) 1997-2002 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ARM architecture version 3 TLB handling functions. | ||
11 | * | ||
12 | * Processors: ARM610, ARM710. | ||
13 | */ | ||
14 | #include <linux/linkage.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | #include "proc-macros.S" | ||
19 | |||
20 | .align 5 | ||
21 | /* | ||
22 | * v3_flush_user_tlb_range(start, end, mm) | ||
23 | * | ||
24 | * Invalidate a range of TLB entries in the specified address space. | ||
25 | * | ||
26 | * - start - range start address | ||
27 | * - end - range end address | ||
28 | * - mm - mm_struct describing address space | ||
29 | */ | ||
30 | .align 5 | ||
31 | ENTRY(v3_flush_user_tlb_range) | ||
32 | vma_vm_mm r2, r2 | ||
33 | act_mm r3 @ get current->active_mm | ||
34 | teq r2, r3 @ == mm ? | ||
35 | movne pc, lr @ no, we dont do anything | ||
36 | ENTRY(v3_flush_kern_tlb_range) | ||
37 | bic r0, r0, #0x0ff | ||
38 | bic r0, r0, #0xf00 | ||
39 | 1: mcr p15, 0, r0, c6, c0, 0 @ invalidate TLB entry | ||
40 | add r0, r0, #PAGE_SZ | ||
41 | cmp r0, r1 | ||
42 | blo 1b | ||
43 | mov pc, lr | ||
44 | |||
45 | __INITDATA | ||
46 | |||
47 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ | ||
48 | define_tlb_functions v3, v3_tlb_flags | ||