diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-05-09 12:24:51 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-05-09 12:29:46 -0400 |
commit | 4102b53392d6397d80b6e09b516517efacf7ea77 (patch) | |
tree | b79aa87af1b3a1ff39b4a00b889cb806dfe32ae7 /arch | |
parent | 6ec18a81b22ab2b40df8424f2b5fc6be20ccad87 (diff) |
ARC: [mm] Aliasing VIPT dcache support 2/4
This is the meat of the series which prevents any dcache alias creation
by always keeping the U and K mapping of a page congruent.
If a mapping already exists, and other tries to access the page, prev
one is flushed to physical page (wback+inv)
Essentially flush_dcache_page()/copy_user_highpage() create K-mapping
of a page, but try to defer flushing, unless U-mapping exist.
When page is actually mapped to userspace, update_mmu_cache() flushes
the K-mapping (in certain cases this can be optimised out)
Additonally flush_cache_mm(), flush_cache_range(), flush_cache_page()
handle the puring of stale userspace mappings on exit/munmap...
flush_anon_page() handles the existing U-mapping for anon page before
kernel reads it via the GUP path.
Note that while not complete, this is enough to boot a simple
dynamically linked Busybox based rootfs
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arc/Kconfig | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/cacheflush.h | 53 | ||||
-rw-r--r-- | arch/arc/include/asm/page.h | 16 | ||||
-rw-r--r-- | arch/arc/include/asm/tlb.h | 11 | ||||
-rw-r--r-- | arch/arc/mm/cache_arc700.c | 134 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 27 |
6 files changed, 223 insertions, 22 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 491ae7923b10..5917099470ea 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -182,6 +182,10 @@ config ARC_CACHE_PAGES | |||
182 | Note that Global I/D ENABLE + Per Page DISABLE works but corollary | 182 | Note that Global I/D ENABLE + Per Page DISABLE works but corollary |
183 | Global DISABLE + Per Page ENABLE won't work | 183 | Global DISABLE + Per Page ENABLE won't work |
184 | 184 | ||
185 | config ARC_CACHE_VIPT_ALIASING | ||
186 | bool "Support VIPT Aliasing D$" | ||
187 | default n | ||
188 | |||
185 | endif #ARC_CACHE | 189 | endif #ARC_CACHE |
186 | 190 | ||
187 | config ARC_HAS_ICCM | 191 | config ARC_HAS_ICCM |
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index ed820bcb745e..d692fbb17254 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h | |||
@@ -50,18 +50,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz); | |||
50 | #define flush_cache_vmap(start, end) flush_cache_all() | 50 | #define flush_cache_vmap(start, end) flush_cache_all() |
51 | #define flush_cache_vunmap(start, end) flush_cache_all() | 51 | #define flush_cache_vunmap(start, end) flush_cache_all() |
52 | 52 | ||
53 | /* | 53 | #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */ |
54 | * VM callbacks when entire/range of user-space V-P mappings are | 54 | |
55 | * torn-down/get-invalidated | 55 | #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING |
56 | * | 56 | |
57 | * Currently we don't support D$ aliasing configs for our VIPT caches | ||
58 | * NOPS for VIPT Cache with non-aliasing D$ configurations only | ||
59 | */ | ||
60 | #define flush_cache_dup_mm(mm) /* called on fork */ | ||
61 | #define flush_cache_mm(mm) /* called on munmap/exit */ | 57 | #define flush_cache_mm(mm) /* called on munmap/exit */ |
62 | #define flush_cache_range(mm, u_vstart, u_vend) | 58 | #define flush_cache_range(mm, u_vstart, u_vend) |
63 | #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ | 59 | #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ |
64 | 60 | ||
61 | #else /* VIPT aliasing dcache */ | ||
62 | |||
63 | /* To clear out stale userspace mappings */ | ||
64 | void flush_cache_mm(struct mm_struct *mm); | ||
65 | void flush_cache_range(struct vm_area_struct *vma, | ||
66 | unsigned long start,unsigned long end); | ||
67 | void flush_cache_page(struct vm_area_struct *vma, | ||
68 | unsigned long user_addr, unsigned long page); | ||
69 | |||
70 | /* | ||
71 | * To make sure that userspace mapping is flushed to memory before | ||
72 | * get_user_pages() uses a kernel mapping to access the page | ||
73 | */ | ||
74 | #define ARCH_HAS_FLUSH_ANON_PAGE | ||
75 | void flush_anon_page(struct vm_area_struct *vma, | ||
76 | struct page *page, unsigned long u_vaddr); | ||
77 | |||
78 | #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ | ||
79 | |||
80 | /* | ||
81 | * Simple wrapper over config option | ||
82 | * Bootup code ensures that hardware matches kernel configuration | ||
83 | */ | ||
84 | static inline int cache_is_vipt_aliasing(void) | ||
85 | { | ||
86 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
87 | return 1; | ||
88 | #else | ||
89 | return 0; | ||
90 | #endif | ||
91 | } | ||
92 | |||
93 | #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3) | ||
94 | |||
95 | /* | ||
96 | * checks if two addresses (after page aligning) index into same cache set | ||
97 | */ | ||
98 | #define addr_not_cache_congruent(addr1, addr2) \ | ||
99 | cache_is_vipt_aliasing() ? \ | ||
100 | (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \ | ||
101 | |||
65 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 102 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
66 | do { \ | 103 | do { \ |
67 | memcpy(dst, src, len); \ | 104 | memcpy(dst, src, len); \ |
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index bdf546104551..374a35514116 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h | |||
@@ -16,13 +16,27 @@ | |||
16 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | 16 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) |
17 | #define free_user_page(page, addr) free_page(addr) | 17 | #define free_user_page(page, addr) free_page(addr) |
18 | 18 | ||
19 | /* TBD: for now don't worry about VIPT D$ aliasing */ | ||
20 | #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) | 19 | #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) |
21 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | 20 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) |
22 | 21 | ||
22 | #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
23 | |||
23 | #define clear_user_page(addr, vaddr, pg) clear_page(addr) | 24 | #define clear_user_page(addr, vaddr, pg) clear_page(addr) |
24 | #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) | 25 | #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) |
25 | 26 | ||
27 | #else /* VIPT aliasing dcache */ | ||
28 | |||
29 | struct vm_area_struct; | ||
30 | struct page; | ||
31 | |||
32 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | ||
33 | |||
34 | void copy_user_highpage(struct page *to, struct page *from, | ||
35 | unsigned long u_vaddr, struct vm_area_struct *vma); | ||
36 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); | ||
37 | |||
38 | #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ | ||
39 | |||
26 | #undef STRICT_MM_TYPECHECKS | 40 | #undef STRICT_MM_TYPECHECKS |
27 | 41 | ||
28 | #ifdef STRICT_MM_TYPECHECKS | 42 | #ifdef STRICT_MM_TYPECHECKS |
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h index fe91719866a5..85b6df839bd7 100644 --- a/arch/arc/include/asm/tlb.h +++ b/arch/arc/include/asm/tlb.h | |||
@@ -30,13 +30,20 @@ do { \ | |||
30 | /* | 30 | /* |
31 | * This pair is called at time of munmap/exit to flush cache and TLB entries | 31 | * This pair is called at time of munmap/exit to flush cache and TLB entries |
32 | * for mappings being torn down. | 32 | * for mappings being torn down. |
33 | * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) | 33 | * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$ |
34 | * as we don't support aliasing configs in our VIPT D$. | ||
35 | * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range | 34 | * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range |
36 | * | 35 | * |
37 | * Note, read http://lkml.org/lkml/2004/1/15/6 | 36 | * Note, read http://lkml.org/lkml/2004/1/15/6 |
38 | */ | 37 | */ |
38 | #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
39 | #define tlb_start_vma(tlb, vma) | 39 | #define tlb_start_vma(tlb, vma) |
40 | #else | ||
41 | #define tlb_start_vma(tlb, vma) \ | ||
42 | do { \ | ||
43 | if (!tlb->fullmm) \ | ||
44 | flush_cache_range(vma, vma->vm_start, vma->vm_end); \ | ||
45 | } while(0) | ||
46 | #endif | ||
40 | 47 | ||
41 | #define tlb_end_vma(tlb, vma) \ | 48 | #define tlb_end_vma(tlb, vma) \ |
42 | do { \ | 49 | do { \ |
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index a9a37089257a..9887195379ef 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c | |||
@@ -68,6 +68,7 @@ | |||
68 | #include <linux/mmu_context.h> | 68 | #include <linux/mmu_context.h> |
69 | #include <linux/syscalls.h> | 69 | #include <linux/syscalls.h> |
70 | #include <linux/uaccess.h> | 70 | #include <linux/uaccess.h> |
71 | #include <linux/pagemap.h> | ||
71 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
72 | #include <asm/cachectl.h> | 73 | #include <asm/cachectl.h> |
73 | #include <asm/setup.h> | 74 | #include <asm/setup.h> |
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void) | |||
138 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; | 139 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; |
139 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; | 140 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; |
140 | int way_pg_ratio = way_pg_ratio; | 141 | int way_pg_ratio = way_pg_ratio; |
142 | int dcache_does_alias; | ||
141 | char str[256]; | 143 | char str[256]; |
142 | 144 | ||
143 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); | 145 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); |
@@ -184,9 +186,13 @@ chk_dc: | |||
184 | panic("Cache H/W doesn't match kernel Config"); | 186 | panic("Cache H/W doesn't match kernel Config"); |
185 | } | 187 | } |
186 | 188 | ||
189 | dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; | ||
190 | |||
187 | /* check for D-Cache aliasing */ | 191 | /* check for D-Cache aliasing */ |
188 | if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) | 192 | if (dcache_does_alias && !cache_is_vipt_aliasing()) |
189 | panic("D$ aliasing not handled right now\n"); | 193 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
194 | else if (!dcache_does_alias && cache_is_vipt_aliasing()) | ||
195 | panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); | ||
190 | #endif | 196 | #endif |
191 | 197 | ||
192 | /* Set the default Invalidate Mode to "simpy discard dirty lines" | 198 | /* Set the default Invalidate Mode to "simpy discard dirty lines" |
@@ -312,7 +318,7 @@ static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, | |||
312 | } | 318 | } |
313 | } | 319 | } |
314 | 320 | ||
315 | /* For kernel mappings cache op index is same as paddr */ | 321 | /* For kernel mappings cache operation: index is same as paddr */ |
316 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) | 322 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
317 | 323 | ||
318 | /* | 324 | /* |
@@ -464,10 +470,47 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, | |||
464 | * Exported APIs | 470 | * Exported APIs |
465 | */ | 471 | */ |
466 | 472 | ||
473 | /* | ||
474 | * Handle cache congruency of kernel and userspace mappings of page when kernel | ||
475 | * writes-to/reads-from | ||
476 | * | ||
477 | * The idea is to defer flushing of kernel mapping after a WRITE, possible if: | ||
478 | * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent | ||
479 | * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) | ||
480 | * -In SMP, if hardware caches are coherent | ||
481 | * | ||
482 | * There's a corollary case, where kernel READs from a userspace mapped page. | ||
483 | * If the U-mapping is not congruent to to K-mapping, former needs flushing. | ||
484 | */ | ||
467 | void flush_dcache_page(struct page *page) | 485 | void flush_dcache_page(struct page *page) |
468 | { | 486 | { |
469 | /* Make a note that dcache is not yet flushed for this page */ | 487 | struct address_space *mapping; |
470 | set_bit(PG_arch_1, &page->flags); | 488 | |
489 | if (!cache_is_vipt_aliasing()) { | ||
490 | set_bit(PG_arch_1, &page->flags); | ||
491 | return; | ||
492 | } | ||
493 | |||
494 | /* don't handle anon pages here */ | ||
495 | mapping = page_mapping(page); | ||
496 | if (!mapping) | ||
497 | return; | ||
498 | |||
499 | /* | ||
500 | * pagecache page, file not yet mapped to userspace | ||
501 | * Make a note that K-mapping is dirty | ||
502 | */ | ||
503 | if (!mapping_mapped(mapping)) { | ||
504 | set_bit(PG_arch_1, &page->flags); | ||
505 | } else if (page_mapped(page)) { | ||
506 | |||
507 | /* kernel reading from page with U-mapping */ | ||
508 | void *paddr = page_address(page); | ||
509 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; | ||
510 | |||
511 | if (addr_not_cache_congruent(paddr, vaddr)) | ||
512 | __flush_dcache_page(paddr, vaddr); | ||
513 | } | ||
471 | } | 514 | } |
472 | EXPORT_SYMBOL(flush_dcache_page); | 515 | EXPORT_SYMBOL(flush_dcache_page); |
473 | 516 | ||
@@ -612,6 +655,87 @@ noinline void flush_cache_all(void) | |||
612 | 655 | ||
613 | } | 656 | } |
614 | 657 | ||
658 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
659 | |||
660 | void flush_cache_mm(struct mm_struct *mm) | ||
661 | { | ||
662 | flush_cache_all(); | ||
663 | } | ||
664 | |||
665 | void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, | ||
666 | unsigned long pfn) | ||
667 | { | ||
668 | unsigned int paddr = pfn << PAGE_SHIFT; | ||
669 | |||
670 | __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); | ||
671 | } | ||
672 | |||
673 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
674 | unsigned long end) | ||
675 | { | ||
676 | flush_cache_all(); | ||
677 | } | ||
678 | |||
679 | void copy_user_highpage(struct page *to, struct page *from, | ||
680 | unsigned long u_vaddr, struct vm_area_struct *vma) | ||
681 | { | ||
682 | void *kfrom = page_address(from); | ||
683 | void *kto = page_address(to); | ||
684 | int clean_src_k_mappings = 0; | ||
685 | |||
686 | /* | ||
687 | * If SRC page was already mapped in userspace AND it's U-mapping is | ||
688 | * not congruent with K-mapping, sync former to physical page so that | ||
689 | * K-mapping in memcpy below, sees the right data | ||
690 | * | ||
691 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is | ||
692 | * equally valid for SRC page as well | ||
693 | */ | ||
694 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { | ||
695 | __flush_dcache_page(kfrom, u_vaddr); | ||
696 | clean_src_k_mappings = 1; | ||
697 | } | ||
698 | |||
699 | copy_page(kto, kfrom); | ||
700 | |||
701 | /* | ||
702 | * Mark DST page K-mapping as dirty for a later finalization by | ||
703 | * update_mmu_cache(). Although the finalization could have been done | ||
704 | * here as well (given that both vaddr/paddr are available). | ||
705 | * But update_mmu_cache() already has code to do that for other | ||
706 | * non copied user pages (e.g. read faults which wire in pagecache page | ||
707 | * directly). | ||
708 | */ | ||
709 | set_bit(PG_arch_1, &to->flags); | ||
710 | |||
711 | /* | ||
712 | * if SRC was already usermapped and non-congruent to kernel mapping | ||
713 | * sync the kernel mapping back to physical page | ||
714 | */ | ||
715 | if (clean_src_k_mappings) { | ||
716 | __flush_dcache_page(kfrom, kfrom); | ||
717 | } else { | ||
718 | set_bit(PG_arch_1, &from->flags); | ||
719 | } | ||
720 | } | ||
721 | |||
722 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) | ||
723 | { | ||
724 | clear_page(to); | ||
725 | set_bit(PG_arch_1, &page->flags); | ||
726 | } | ||
727 | |||
728 | void flush_anon_page(struct vm_area_struct *vma, struct page *page, | ||
729 | unsigned long u_vaddr) | ||
730 | { | ||
731 | /* TBD: do we really need to clear the kernel mapping */ | ||
732 | __flush_dcache_page(page_address(page), u_vaddr); | ||
733 | __flush_dcache_page(page_address(page), page_address(page)); | ||
734 | |||
735 | } | ||
736 | |||
737 | #endif | ||
738 | |||
615 | /********************************************************************** | 739 | /********************************************************************** |
616 | * Explicit Cache flush request from user space via syscall | 740 | * Explicit Cache flush request from user space via syscall |
617 | * Needed for JITs which generate code on the fly | 741 | * Needed for JITs which generate code on the fly |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 5810c7a92b77..066145b5f348 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
421 | /* | 421 | /* |
422 | * Called at the end of pagefault, for a userspace mapped page | 422 | * Called at the end of pagefault, for a userspace mapped page |
423 | * -pre-install the corresponding TLB entry into MMU | 423 | * -pre-install the corresponding TLB entry into MMU |
424 | * -Finalize the delayed D-cache flush (wback+inv kernel mapping) | 424 | * -Finalize the delayed D-cache flush of kernel mapping of page due to |
425 | * flush_dcache_page(), copy_user_page() | ||
426 | * | ||
427 | * Note that flush (when done) involves both WBACK - so physical page is | ||
428 | * in sync as well as INV - so any non-congruent aliases don't remain | ||
425 | */ | 429 | */ |
426 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, | 430 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
427 | pte_t *ptep) | 431 | pte_t *ptep) |
428 | { | 432 | { |
429 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; | 433 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
434 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; | ||
430 | 435 | ||
431 | create_tlb(vma, vaddr, ptep); | 436 | create_tlb(vma, vaddr, ptep); |
432 | 437 | ||
433 | /* icache doesn't snoop dcache, thus needs to be made coherent here */ | 438 | /* |
434 | if (vma->vm_flags & VM_EXEC) { | 439 | * Exec page : Independent of aliasing/page-color considerations, |
440 | * since icache doesn't snoop dcache on ARC, any dirty | ||
441 | * K-mapping of a code page needs to be wback+inv so that | ||
442 | * icache fetch by userspace sees code correctly. | ||
443 | * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it | ||
444 | * so userspace sees the right data. | ||
445 | * (Avoids the flush for Non-exec + congruent mapping case) | ||
446 | */ | ||
447 | if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) { | ||
435 | struct page *page = pfn_to_page(pte_pfn(*ptep)); | 448 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
436 | 449 | ||
437 | /* if page was dcache dirty, flush now */ | ||
438 | int dirty = test_and_clear_bit(PG_arch_1, &page->flags); | 450 | int dirty = test_and_clear_bit(PG_arch_1, &page->flags); |
439 | if (dirty) { | 451 | if (dirty) { |
440 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; | 452 | /* wback + inv dcache lines */ |
441 | __flush_dcache_page(paddr, paddr); | 453 | __flush_dcache_page(paddr, paddr); |
442 | __inv_icache_page(paddr, vaddr); | 454 | |
455 | /* invalidate any existing icache lines */ | ||
456 | if (vma->vm_flags & VM_EXEC) | ||
457 | __inv_icache_page(paddr, vaddr); | ||
443 | } | 458 | } |
444 | } | 459 | } |
445 | } | 460 | } |