aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/pageattr.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /arch/arm/mm/pageattr.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'arch/arm/mm/pageattr.c')
-rw-r--r--arch/arm/mm/pageattr.c1076
1 files changed, 1076 insertions, 0 deletions
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
new file mode 100644
index 00000000000..5f8071110e8
--- /dev/null
+++ b/arch/arm/mm/pageattr.c
@@ -0,0 +1,1076 @@
1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
5#include <linux/highmem.h>
6#include <linux/bootmem.h>
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/interrupt.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/pfn.h>
14#include <linux/percpu.h>
15#include <linux/gfp.h>
16#include <linux/vmalloc.h>
17#include <linux/mutex.h>
18
19#include <asm/processor.h>
20#include <asm/tlbflush.h>
21#include <asm/sections.h>
22#include <asm/setup.h>
23#include <asm/uaccess.h>
24#include <asm/pgalloc.h>
25
26#ifdef CPA_DEBUG
27#define cpa_debug(x, ...) printk(x, __VA_ARGS__)
28#else
29#define cpa_debug(x, ...)
30#endif
31
32#define FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD 8
33extern void v7_flush_kern_cache_all(void *);
34extern void __flush_dcache_page(struct address_space *, struct page *);
35
36static void inner_flush_cache_all(void)
37{
38 on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
39}
40
41#if defined(CONFIG_CPA)
42/*
43 * The current flushing context - we pass it instead of 5 arguments:
44 */
45struct cpa_data {
46 unsigned long *vaddr;
47 pgprot_t mask_set;
48 pgprot_t mask_clr;
49 int numpages;
50 int flags;
51 unsigned long pfn;
52 unsigned force_split:1;
53 int curpage;
54 struct page **pages;
55};
56
57/*
58 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
59 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
60 * entries change the page attribute in parallel to some other cpu
61 * splitting a large page entry along with changing the attribute.
62 */
63static DEFINE_MUTEX(cpa_lock);
64
65#define CPA_FLUSHTLB 1
66#define CPA_ARRAY 2
67#define CPA_PAGES_ARRAY 4
68
69#ifdef CONFIG_PROC_FS
70static unsigned long direct_pages_count[PG_LEVEL_NUM];
71
72void update_page_count(int level, unsigned long pages)
73{
74 unsigned long flags;
75
76 /* Protect against CPA */
77 spin_lock_irqsave(&pgd_lock, flags);
78 direct_pages_count[level] += pages;
79 spin_unlock_irqrestore(&pgd_lock, flags);
80}
81
82static void split_page_count(int level)
83{
84 direct_pages_count[level]--;
85 direct_pages_count[level - 1] += PTRS_PER_PTE;
86}
87
88void arch_report_meminfo(struct seq_file *m)
89{
90 seq_printf(m, "DirectMap4k: %8lu kB\n",
91 direct_pages_count[PG_LEVEL_4K] << 2);
92 seq_printf(m, "DirectMap2M: %8lu kB\n",
93 direct_pages_count[PG_LEVEL_2M] << 11);
94}
95#else
96static inline void split_page_count(int level) { }
97#endif
98
99#ifdef CONFIG_DEBUG_PAGEALLOC
100# define debug_pagealloc 1
101#else
102# define debug_pagealloc 0
103#endif
104
105static inline int
106within(unsigned long addr, unsigned long start, unsigned long end)
107{
108 return addr >= start && addr < end;
109}
110
111static void cpa_flush_range(unsigned long start, int numpages, int cache)
112{
113 unsigned int i, level;
114 unsigned long addr;
115
116 BUG_ON(irqs_disabled());
117 WARN_ON(PAGE_ALIGN(start) != start);
118
119 flush_tlb_kernel_range(start, start + (numpages << PAGE_SHIFT));
120
121 if (!cache)
122 return;
123
124 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
125 pte_t *pte = lookup_address(addr, &level);
126
127 /*
128 * Only flush present addresses:
129 */
130 if (pte && pte_present(*pte)) {
131 __cpuc_flush_dcache_area((void *) addr, PAGE_SIZE);
132 outer_flush_range(__pa((void *)addr),
133 __pa((void *)addr) + PAGE_SIZE);
134 }
135 }
136}
137
138static void cpa_flush_array(unsigned long *start, int numpages, int cache,
139 int in_flags, struct page **pages)
140{
141 unsigned int i, level;
142 bool flush_inner = true;
143 unsigned long base;
144
145 BUG_ON(irqs_disabled());
146
147 if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD &&
148 cache && in_flags & CPA_PAGES_ARRAY) {
149 inner_flush_cache_all();
150 flush_inner = false;
151 }
152
153 for (i = 0; i < numpages; i++) {
154 unsigned long addr;
155 pte_t *pte;
156
157 if (in_flags & CPA_PAGES_ARRAY)
158 addr = (unsigned long)page_address(pages[i]);
159 else
160 addr = start[i];
161
162 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
163
164 if (cache && in_flags & CPA_PAGES_ARRAY) {
165 /* cache flush all pages including high mem pages. */
166 if (flush_inner)
167 __flush_dcache_page(
168 page_mapping(pages[i]), pages[i]);
169 base = page_to_phys(pages[i]);
170 outer_flush_range(base, base + PAGE_SIZE);
171 } else if (cache) {
172 pte = lookup_address(addr, &level);
173
174 /*
175 * Only flush present addresses:
176 */
177 if (pte && pte_present(*pte)) {
178 __cpuc_flush_dcache_area((void *)addr,
179 PAGE_SIZE);
180 outer_flush_range(__pa((void *)addr),
181 __pa((void *)addr) + PAGE_SIZE);
182 }
183 }
184 }
185}
186
187/*
188 * Certain areas of memory require very specific protection flags,
189 * for example the kernel text. Callers don't always get this
190 * right so this function checks and fixes these known static
191 * required protection bits.
192 */
193static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
194 unsigned long pfn)
195{
196 pgprot_t forbidden = __pgprot(0);
197
198 /*
199 * The kernel text needs to be executable for obvious reasons
200 * Does not cover __inittext since that is gone later on.
201 */
202 if (within(address, (unsigned long)_text, (unsigned long)_etext))
203 pgprot_val(forbidden) |= L_PTE_XN;
204
205 /*
206 * The .rodata section needs to be read-only. Using the pfn
207 * catches all aliases.
208 */
209 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
210 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
211 prot |= L_PTE_RDONLY;
212
213 /*
214 * Mask off the forbidden bits and set the bits that are needed
215 */
216 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
217
218
219 return prot;
220}
221
222static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte,
223 unsigned long ext_prot)
224{
225 pgprot_t ref_prot;
226
227 ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
228
229 if (pte & L_PTE_MT_BUFFERABLE)
230 ref_prot |= PMD_SECT_BUFFERABLE;
231
232 if (pte & L_PTE_MT_WRITETHROUGH)
233 ref_prot |= PMD_SECT_CACHEABLE;
234
235 if (pte & L_PTE_SHARED)
236 ref_prot |= PMD_SECT_S;
237
238 if (pte & L_PTE_XN)
239 ref_prot |= PMD_SECT_XN;
240
241 if (pte & L_PTE_RDONLY)
242 ref_prot &= ~PMD_SECT_AP_WRITE;
243
244 ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX |
245 PTE_EXT_NG | (7 << 6))) << 6;
246
247 return ref_prot;
248}
249
250static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd,
251 unsigned long *ext_prot)
252{
253 pgprot_t ref_prot = 0;
254
255 ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY;
256
257 if (pmd & PMD_SECT_BUFFERABLE)
258 ref_prot |= L_PTE_MT_BUFFERABLE;
259
260 if (pmd & PMD_SECT_CACHEABLE)
261 ref_prot |= L_PTE_MT_WRITETHROUGH;
262
263 if (pmd & PMD_SECT_S)
264 ref_prot |= L_PTE_SHARED;
265
266 if (pmd & PMD_SECT_XN)
267 ref_prot |= L_PTE_XN;
268
269 if (pmd & PMD_SECT_AP_WRITE)
270 ref_prot &= ~L_PTE_RDONLY;
271
272 /* AP/APX/TEX bits */
273 *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
274 PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6;
275
276 return ref_prot;
277}
278
279/*
280 * Lookup the page table entry for a virtual address. Return a pointer
281 * to the entry and the level of the mapping.
282 *
283 * Note: We return pud and pmd either when the entry is marked large
284 * or when the present bit is not set. Otherwise we would return a
285 * pointer to a nonexisting mapping.
286 */
287pte_t *lookup_address(unsigned long address, unsigned int *level)
288{
289 pgd_t *pgd = pgd_offset_k(address);
290 pte_t *pte;
291 pmd_t *pmd;
292
293 /* pmds are folded into pgds on ARM */
294 *level = PG_LEVEL_NONE;
295
296 if (pgd == NULL || pgd_none(*pgd))
297 return NULL;
298
299 pmd = pmd_offset(pgd, address);
300
301 if (pmd == NULL || pmd_none(*pmd) || !pmd_present(*pmd))
302 return NULL;
303
304 if (((pmd_val(*pmd) & (PMD_TYPE_SECT | PMD_SECT_SUPER))
305 == (PMD_TYPE_SECT | PMD_SECT_SUPER)) || !pmd_present(*pmd)) {
306
307 return NULL;
308 } else if (pmd_val(*pmd) & PMD_TYPE_SECT) {
309
310 *level = PG_LEVEL_2M;
311 return (pte_t *)pmd;
312 }
313
314 pte = pte_offset_kernel(pmd, address);
315
316 if ((pte == NULL) || pte_none(*pte))
317 return NULL;
318
319 *level = PG_LEVEL_4K;
320
321 return pte;
322}
323EXPORT_SYMBOL_GPL(lookup_address);
324
325/*
326 * Set the new pmd in all the pgds we know about:
327 */
328static void __set_pmd_pte(pmd_t *pmd, unsigned long address, pte_t *pte)
329{
330 struct page *page;
331
332 cpa_debug("__set_pmd_pte %x %x %x\n", pmd, pte, *pte);
333
334 /* change init_mm */
335 pmd_populate_kernel(&init_mm, pmd, pte);
336
337 /* change entry in all the pgd's */
338 list_for_each_entry(page, &pgd_list, lru) {
339 cpa_debug("list %x %x %x\n", (unsigned long)page,
340 (unsigned long)pgd_index(address), address);
341 pmd = pmd_offset(((pgd_t *)page_address(page)) +
342 pgd_index(address), address);
343 pmd_populate_kernel(NULL, pmd, pte);
344 }
345
346}
347
348static int
349try_preserve_large_page(pte_t *kpte, unsigned long address,
350 struct cpa_data *cpa)
351{
352 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
353 pte_t old_pte, *tmp;
354 pgprot_t old_prot, new_prot, ext_prot, req_prot;
355 int i, do_split = 1;
356 unsigned int level;
357
358 if (cpa->force_split)
359 return 1;
360
361 spin_lock_irqsave(&pgd_lock, flags);
362 /*
363 * Check for races, another CPU might have split this page
364 * up already:
365 */
366 tmp = lookup_address(address, &level);
367 if (tmp != kpte)
368 goto out_unlock;
369
370 switch (level) {
371
372 case PG_LEVEL_2M:
373 psize = PMD_SIZE;
374 pmask = PMD_MASK;
375 break;
376
377 default:
378 do_split = -EINVAL;
379 goto out_unlock;
380 }
381
382 /*
383 * Calculate the number of pages, which fit into this large
384 * page starting at address:
385 */
386 nextpage_addr = (address + psize) & pmask;
387 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
388 if (numpages < cpa->numpages)
389 cpa->numpages = numpages;
390
391 old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte),
392 &ext_prot);
393
394 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
395 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
396
397 /*
398 * old_pte points to the large page base address. So we need
399 * to add the offset of the virtual address:
400 */
401 pfn = pmd_pfn(*kpte) + ((address & (psize - 1)) >> PAGE_SHIFT);
402 cpa->pfn = pfn;
403
404 new_prot = static_protections(req_prot, address, pfn);
405
406 /*
407 * We need to check the full range, whether
408 * static_protection() requires a different pgprot for one of
409 * the pages in the range we try to preserve:
410 */
411 addr = address & pmask;
412 pfn = pmd_pfn(old_pte);
413 for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
414 pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
415
416 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
417 goto out_unlock;
418 }
419
420 /*
421 * If there are no changes, return. maxpages has been updated
422 * above:
423 */
424 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
425 do_split = 0;
426 goto out_unlock;
427 }
428
429 /*
430 * convert prot to pmd format
431 */
432 new_prot = pte_to_pmd_pgprot(new_prot, ext_prot);
433
434 /*
435 * We need to change the attributes. Check, whether we can
436 * change the large page in one go. We request a split, when
437 * the address is not aligned and the number of pages is
438 * smaller than the number of pages in the large page. Note
439 * that we limited the number of possible pages already to
440 * the number of pages in the large page.
441 */
442 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
443 /*
444 * The address is aligned and the number of pages
445 * covers the full page.
446 */
447 phys_addr_t phys = __pfn_to_phys(pmd_pfn(*kpte));
448 pmd_t *p = (pmd_t *)kpte;
449
450 *kpte++ = __pmd(phys | new_prot);
451 *kpte = __pmd((phys + SECTION_SIZE) | new_prot);
452 flush_pmd_entry(p);
453 cpa->flags |= CPA_FLUSHTLB;
454 do_split = 0;
455 cpa_debug("preserving page at phys %x pmd %x\n", phys, p);
456 }
457
458out_unlock:
459 spin_unlock_irqrestore(&pgd_lock, flags);
460
461 return do_split;
462}
463
464static int split_large_page(pte_t *kpte, unsigned long address)
465{
466 unsigned long flags, pfn, pfninc = 1;
467 unsigned int i, level;
468 pte_t *pbase, *tmp;
469 pgprot_t ref_prot = 0, ext_prot = 0;
470 int ret = 0;
471
472 pbase = pte_alloc_one_kernel(&init_mm, address);
473 if (!pbase)
474 return -ENOMEM;
475
476 cpa_debug("split_large_page %x PMD %x new pte @ %x\n", address,
477 *kpte, pbase);
478
479 spin_lock_irqsave(&pgd_lock, flags);
480 /*
481 * Check for races, another CPU might have split this page
482 * up for us already:
483 */
484 tmp = lookup_address(address, &level);
485 if (tmp != kpte)
486 goto out_unlock;
487
488 /*
489 * we only split 2MB entries for now
490 */
491 if (level != PG_LEVEL_2M) {
492 ret = -EINVAL;
493 goto out_unlock;
494 }
495
496 ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot);
497
498 /*
499 * Get the target pfn from the original entry:
500 */
501 pfn = pmd_pfn(*kpte);
502 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
503 set_pte_ext(&pbase[i], pfn_pte(pfn, ref_prot), ext_prot);
504
505 if (address >= (unsigned long)__va(0) &&
506 address < (unsigned long)__va(lowmem_limit))
507 split_page_count(level);
508
509 /*
510 * Install the new, split up pagetable.
511 */
512 __set_pmd_pte((pmd_t *)kpte, address, pbase);
513
514 pbase = NULL;
515
516out_unlock:
517 /*
518 * If we dropped out via the lookup_address check under
519 * pgd_lock then stick the page back into the pool:
520 */
521 if (pbase)
522 pte_free_kernel(&init_mm, pbase);
523
524 spin_unlock_irqrestore(&pgd_lock, flags);
525
526 return ret;
527}
528
529static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
530 int primary)
531{
532 /*
533 * Ignore all non primary paths.
534 */
535 if (!primary)
536 return 0;
537
538 /*
539 * Ignore the NULL PTE for kernel identity mapping, as it is expected
540 * to have holes.
541 * Also set numpages to '1' indicating that we processed cpa req for
542 * one virtual address page and its pfn. TBD: numpages can be set based
543 * on the initial value and the level returned by lookup_address().
544 */
545 if (within(vaddr, PAGE_OFFSET,
546 PAGE_OFFSET + lowmem_limit)) {
547 cpa->numpages = 1;
548 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
549 return 0;
550 } else {
551 WARN(1, KERN_WARNING "CPA: called for zero pte. "
552 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
553 *cpa->vaddr);
554
555 return -EFAULT;
556 }
557}
558
559static int __change_page_attr(struct cpa_data *cpa, int primary)
560{
561 unsigned long address;
562 int do_split, err;
563 unsigned int level;
564 pte_t *kpte, old_pte;
565
566 if (cpa->flags & CPA_PAGES_ARRAY) {
567 struct page *page = cpa->pages[cpa->curpage];
568
569 if (unlikely(PageHighMem(page)))
570 return 0;
571
572 address = (unsigned long)page_address(page);
573
574 } else if (cpa->flags & CPA_ARRAY)
575 address = cpa->vaddr[cpa->curpage];
576 else
577 address = *cpa->vaddr;
578
579repeat:
580 kpte = lookup_address(address, &level);
581 if (!kpte)
582 return __cpa_process_fault(cpa, address, primary);
583
584 old_pte = *kpte;
585 if (!pte_val(old_pte))
586 return __cpa_process_fault(cpa, address, primary);
587
588 if (level == PG_LEVEL_4K) {
589 pte_t new_pte;
590 pgprot_t new_prot = pte_pgprot(old_pte);
591 unsigned long pfn = pte_pfn(old_pte);
592
593 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
594 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
595
596 new_prot = static_protections(new_prot, address, pfn);
597
598 /*
599 * We need to keep the pfn from the existing PTE,
600 * after all we're only going to change it's attributes
601 * not the memory it points to
602 */
603 new_pte = pfn_pte(pfn, new_prot);
604 cpa->pfn = pfn;
605
606 /*
607 * Do we really change anything ?
608 */
609 if (pte_val(old_pte) != pte_val(new_pte)) {
610 set_pte_ext(kpte, new_pte, 0);
611 /*
612 * FIXME : is this needed on arm?
613 * set_pte_ext already does a flush
614 */
615 cpa->flags |= CPA_FLUSHTLB;
616 }
617 cpa->numpages = 1;
618 return 0;
619 }
620
621 /*
622 * Check, whether we can keep the large page intact
623 * and just change the pte:
624 */
625 do_split = try_preserve_large_page(kpte, address, cpa);
626
627 /*
628 * When the range fits into the existing large page,
629 * return. cp->numpages and cpa->tlbflush have been updated in
630 * try_large_page:
631 */
632 if (do_split <= 0)
633 return do_split;
634
635 /*
636 * We have to split the large page:
637 */
638 err = split_large_page(kpte, address);
639
640 if (!err) {
641 /*
642 * Do a global flush tlb after splitting the large page
643 * and before we do the actual change page attribute in the PTE.
644 *
645 * With out this, we violate the TLB application note, that says
646 * "The TLBs may contain both ordinary and large-page
647 * translations for a 4-KByte range of linear addresses. This
648 * may occur if software modifies the paging structures so that
649 * the page size used for the address range changes. If the two
650 * translations differ with respect to page frame or attributes
651 * (e.g., permissions), processor behavior is undefined and may
652 * be implementation-specific."
653 *
654 * We do this global tlb flush inside the cpa_lock, so that we
655 * don't allow any other cpu, with stale tlb entries change the
656 * page attribute in parallel, that also falls into the
657 * just split large page entry.
658 */
659 flush_tlb_all();
660 goto repeat;
661 }
662
663 return err;
664}
665
666static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
667
668static int cpa_process_alias(struct cpa_data *cpa)
669{
670 struct cpa_data alias_cpa;
671 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
672 unsigned long vaddr;
673 int ret;
674
675 if (cpa->pfn >= (lowmem_limit >> PAGE_SHIFT))
676 return 0;
677
678 /*
679 * No need to redo, when the primary call touched the direct
680 * mapping already:
681 */
682 if (cpa->flags & CPA_PAGES_ARRAY) {
683 struct page *page = cpa->pages[cpa->curpage];
684 if (unlikely(PageHighMem(page)))
685 return 0;
686 vaddr = (unsigned long)page_address(page);
687 } else if (cpa->flags & CPA_ARRAY)
688 vaddr = cpa->vaddr[cpa->curpage];
689 else
690 vaddr = *cpa->vaddr;
691
692 if (!(within(vaddr, PAGE_OFFSET,
693 PAGE_OFFSET + lowmem_limit))) {
694
695 alias_cpa = *cpa;
696 alias_cpa.vaddr = &laddr;
697 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
698
699 ret = __change_page_attr_set_clr(&alias_cpa, 0);
700 if (ret)
701 return ret;
702 }
703
704 return 0;
705}
706
707static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
708{
709 int ret, numpages = cpa->numpages;
710
711 while (numpages) {
712 /*
713 * Store the remaining nr of pages for the large page
714 * preservation check.
715 */
716 cpa->numpages = numpages;
717 /* for array changes, we can't use large page */
718 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
719 cpa->numpages = 1;
720
721 if (!debug_pagealloc)
722 mutex_lock(&cpa_lock);
723 ret = __change_page_attr(cpa, checkalias);
724 if (!debug_pagealloc)
725 mutex_unlock(&cpa_lock);
726 if (ret)
727 return ret;
728
729 if (checkalias) {
730 ret = cpa_process_alias(cpa);
731 if (ret)
732 return ret;
733 }
734
735 /*
736 * Adjust the number of pages with the result of the
737 * CPA operation. Either a large page has been
738 * preserved or a single page update happened.
739 */
740 BUG_ON(cpa->numpages > numpages);
741 numpages -= cpa->numpages;
742 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
743 cpa->curpage++;
744 else
745 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
746 }
747 return 0;
748}
749
750static inline int cache_attr(pgprot_t attr)
751{
752 /*
753 * We need to flush the cache for all memory type changes
754 * except when a page is being marked write back cacheable
755 */
756 return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK);
757}
758
759static int change_page_attr_set_clr(unsigned long *addr, int numpages,
760 pgprot_t mask_set, pgprot_t mask_clr,
761 int force_split, int in_flag,
762 struct page **pages)
763{
764 struct cpa_data cpa;
765 int ret, cache, checkalias;
766 unsigned long baddr = 0;
767
768 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
769 return 0;
770
771 /* Ensure we are PAGE_SIZE aligned */
772 if (in_flag & CPA_ARRAY) {
773 int i;
774 for (i = 0; i < numpages; i++) {
775 if (addr[i] & ~PAGE_MASK) {
776 addr[i] &= PAGE_MASK;
777 WARN_ON_ONCE(1);
778 }
779 }
780 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
781 /*
782 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
783 * No need to cehck in that case
784 */
785 if (*addr & ~PAGE_MASK) {
786 *addr &= PAGE_MASK;
787 /*
788 * People should not be passing in unaligned addresses:
789 */
790 WARN_ON_ONCE(1);
791 }
792 /*
793 * Save address for cache flush. *addr is modified in the call
794 * to __change_page_attr_set_clr() below.
795 */
796 baddr = *addr;
797 }
798
799 /* Must avoid aliasing mappings in the highmem code */
800 kmap_flush_unused();
801
802 vm_unmap_aliases();
803
804 cpa.vaddr = addr;
805 cpa.pages = pages;
806 cpa.numpages = numpages;
807 cpa.mask_set = mask_set;
808 cpa.mask_clr = mask_clr;
809 cpa.flags = 0;
810 cpa.curpage = 0;
811 cpa.force_split = force_split;
812
813 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
814 cpa.flags |= in_flag;
815
816 /* No alias checking for XN bit modifications */
817 checkalias = (pgprot_val(mask_set) |
818 pgprot_val(mask_clr)) != L_PTE_XN;
819
820 ret = __change_page_attr_set_clr(&cpa, checkalias);
821
822 cache = cache_attr(mask_set);
823 /*
824 * Check whether we really changed something or
825 * cache need to be flushed.
826 */
827 if (!(cpa.flags & CPA_FLUSHTLB) && !cache)
828 goto out;
829
830 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
831 cpa_flush_array(addr, numpages, cache,
832 cpa.flags, pages);
833 } else
834 cpa_flush_range(baddr, numpages, cache);
835
836out:
837 return ret;
838}
839
840static inline int change_page_attr_set(unsigned long *addr, int numpages,
841 pgprot_t mask, int array)
842{
843 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
844 (array ? CPA_ARRAY : 0), NULL);
845}
846
847static inline int change_page_attr_clear(unsigned long *addr, int numpages,
848 pgprot_t mask, int array)
849{
850 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
851 (array ? CPA_ARRAY : 0), NULL);
852}
853
854static inline int cpa_set_pages_array(struct page **pages, int numpages,
855 pgprot_t mask)
856{
857 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
858 CPA_PAGES_ARRAY, pages);
859}
860
861static inline int cpa_clear_pages_array(struct page **pages, int numpages,
862 pgprot_t mask)
863{
864 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
865 CPA_PAGES_ARRAY, pages);
866}
867
868int set_memory_uc(unsigned long addr, int numpages)
869{
870 return change_page_attr_set_clr(&addr, numpages,
871 __pgprot(L_PTE_MT_UNCACHED),
872 __pgprot(L_PTE_MT_MASK), 0, 0, NULL);
873}
874EXPORT_SYMBOL(set_memory_uc);
875
876int _set_memory_array(unsigned long *addr, int addrinarray,
877 unsigned long set, unsigned long clr)
878{
879 return change_page_attr_set_clr(addr, addrinarray, __pgprot(set),
880 __pgprot(clr), 0, CPA_ARRAY, NULL);
881}
882
883int set_memory_array_uc(unsigned long *addr, int addrinarray)
884{
885 return _set_memory_array(addr, addrinarray,
886 L_PTE_MT_UNCACHED, L_PTE_MT_MASK);
887}
888EXPORT_SYMBOL(set_memory_array_uc);
889
890int set_memory_array_wc(unsigned long *addr, int addrinarray)
891{
892 return _set_memory_array(addr, addrinarray,
893 L_PTE_MT_BUFFERABLE, L_PTE_MT_MASK);
894}
895EXPORT_SYMBOL(set_memory_array_wc);
896
897int set_memory_wc(unsigned long addr, int numpages)
898{
899 int ret;
900
901 ret = change_page_attr_set_clr(&addr, numpages,
902 __pgprot(L_PTE_MT_BUFFERABLE),
903 __pgprot(L_PTE_MT_MASK),
904 0, 0, NULL);
905 return ret;
906}
907EXPORT_SYMBOL(set_memory_wc);
908
909int set_memory_wb(unsigned long addr, int numpages)
910{
911 return change_page_attr_set_clr(&addr, numpages,
912 __pgprot(L_PTE_MT_WRITEBACK),
913 __pgprot(L_PTE_MT_MASK),
914 0, 0, NULL);
915}
916EXPORT_SYMBOL(set_memory_wb);
917
918int set_memory_iwb(unsigned long addr, int numpages)
919{
920 return change_page_attr_set_clr(&addr, numpages,
921 __pgprot(L_PTE_MT_INNER_WB),
922 __pgprot(L_PTE_MT_MASK),
923 0, 0, NULL);
924}
925EXPORT_SYMBOL(set_memory_iwb);
926
927int set_memory_array_wb(unsigned long *addr, int addrinarray)
928{
929 return change_page_attr_set_clr(addr, addrinarray,
930 __pgprot(L_PTE_MT_WRITEBACK),
931 __pgprot(L_PTE_MT_MASK),
932 0, CPA_ARRAY, NULL);
933
934}
935EXPORT_SYMBOL(set_memory_array_wb);
936
937int set_memory_array_iwb(unsigned long *addr, int addrinarray)
938{
939 return change_page_attr_set_clr(addr, addrinarray,
940 __pgprot(L_PTE_MT_INNER_WB),
941 __pgprot(L_PTE_MT_MASK),
942 0, CPA_ARRAY, NULL);
943
944}
945EXPORT_SYMBOL(set_memory_array_iwb);
946
947int set_memory_x(unsigned long addr, int numpages)
948{
949 return change_page_attr_clear(&addr, numpages,
950 __pgprot(L_PTE_XN), 0);
951}
952EXPORT_SYMBOL(set_memory_x);
953
954int set_memory_nx(unsigned long addr, int numpages)
955{
956 return change_page_attr_set(&addr, numpages,
957 __pgprot(L_PTE_XN), 0);
958}
959EXPORT_SYMBOL(set_memory_nx);
960
961int set_memory_ro(unsigned long addr, int numpages)
962{
963 return change_page_attr_set(&addr, numpages,
964 __pgprot(L_PTE_RDONLY), 0);
965}
966EXPORT_SYMBOL_GPL(set_memory_ro);
967
968int set_memory_rw(unsigned long addr, int numpages)
969{
970 return change_page_attr_clear(&addr, numpages,
971 __pgprot(L_PTE_RDONLY), 0);
972}
973EXPORT_SYMBOL_GPL(set_memory_rw);
974
975int set_memory_np(unsigned long addr, int numpages)
976{
977 return change_page_attr_clear(&addr, numpages,
978 __pgprot(L_PTE_PRESENT), 0);
979}
980
981int set_memory_4k(unsigned long addr, int numpages)
982{
983 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
984 __pgprot(0), 1, 0, NULL);
985}
986
987static int _set_pages_array(struct page **pages, int addrinarray,
988 unsigned long set, unsigned long clr)
989{
990 return change_page_attr_set_clr(NULL, addrinarray,
991 __pgprot(set),
992 __pgprot(clr),
993 0, CPA_PAGES_ARRAY, pages);
994}
995
996int set_pages_array_uc(struct page **pages, int addrinarray)
997{
998 return _set_pages_array(pages, addrinarray,
999 L_PTE_MT_UNCACHED, L_PTE_MT_MASK);
1000}
1001EXPORT_SYMBOL(set_pages_array_uc);
1002
1003int set_pages_array_wc(struct page **pages, int addrinarray)
1004{
1005 return _set_pages_array(pages, addrinarray, L_PTE_MT_BUFFERABLE,
1006 L_PTE_MT_MASK);
1007}
1008EXPORT_SYMBOL(set_pages_array_wc);
1009
1010int set_pages_array_wb(struct page **pages, int addrinarray)
1011{
1012 return _set_pages_array(pages, addrinarray,
1013 L_PTE_MT_WRITEBACK, L_PTE_MT_MASK);
1014}
1015EXPORT_SYMBOL(set_pages_array_wb);
1016
1017int set_pages_array_iwb(struct page **pages, int addrinarray)
1018{
1019 return _set_pages_array(pages, addrinarray,
1020 L_PTE_MT_INNER_WB, L_PTE_MT_MASK);
1021}
1022EXPORT_SYMBOL(set_pages_array_iwb);
1023
1024#else /* CONFIG_CPA */
1025
1026void update_page_count(int level, unsigned long pages)
1027{
1028}
1029
1030static void flush_cache(struct page **pages, int numpages)
1031{
1032 unsigned int i;
1033 bool flush_inner = true;
1034 unsigned long base;
1035
1036 if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD) {
1037 inner_flush_cache_all();
1038 flush_inner = false;
1039 }
1040
1041 for (i = 0; i < numpages; i++) {
1042 if (flush_inner)
1043 __flush_dcache_page(page_mapping(pages[i]), pages[i]);
1044 base = page_to_phys(pages[i]);
1045 outer_flush_range(base, base + PAGE_SIZE);
1046 }
1047}
1048
1049int set_pages_array_uc(struct page **pages, int addrinarray)
1050{
1051 flush_cache(pages, addrinarray);
1052 return 0;
1053}
1054EXPORT_SYMBOL(set_pages_array_uc);
1055
1056int set_pages_array_wc(struct page **pages, int addrinarray)
1057{
1058 flush_cache(pages, addrinarray);
1059 return 0;
1060}
1061EXPORT_SYMBOL(set_pages_array_wc);
1062
1063int set_pages_array_wb(struct page **pages, int addrinarray)
1064{
1065 return 0;
1066}
1067EXPORT_SYMBOL(set_pages_array_wb);
1068
1069int set_pages_array_iwb(struct page **pages, int addrinarray)
1070{
1071 flush_cache(pages, addrinarray);
1072 return 0;
1073}
1074EXPORT_SYMBOL(set_pages_array_iwb);
1075
1076#endif