diff options
| author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
|---|---|---|
| committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
| commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
| tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/arm/mm/flush.c | |
| parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
| parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) | |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/arm/mm/flush.c')
| -rw-r--r-- | arch/arm/mm/flush.c | 109 |
1 files changed, 79 insertions, 30 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 7f294f307c83..c6844cb9b508 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | 13 | ||
| 14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
| 15 | #include <asm/cachetype.h> | 15 | #include <asm/cachetype.h> |
| 16 | #include <asm/highmem.h> | ||
| 17 | #include <asm/smp_plat.h> | ||
| 16 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 17 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
| 18 | 20 | ||
| @@ -35,14 +37,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
| 35 | : | 37 | : |
| 36 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) | 38 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
| 37 | : "cc"); | 39 | : "cc"); |
| 38 | __flush_icache_all(); | ||
| 39 | } | 40 | } |
| 40 | 41 | ||
| 41 | void flush_cache_mm(struct mm_struct *mm) | 42 | void flush_cache_mm(struct mm_struct *mm) |
| 42 | { | 43 | { |
| 43 | if (cache_is_vivt()) { | 44 | if (cache_is_vivt()) { |
| 44 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | 45 | vivt_flush_cache_mm(mm); |
| 45 | __cpuc_flush_user_all(); | ||
| 46 | return; | 46 | return; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| @@ -52,16 +52,13 @@ void flush_cache_mm(struct mm_struct *mm) | |||
| 52 | : | 52 | : |
| 53 | : "r" (0) | 53 | : "r" (0) |
| 54 | : "cc"); | 54 | : "cc"); |
| 55 | __flush_icache_all(); | ||
| 56 | } | 55 | } |
| 57 | } | 56 | } |
| 58 | 57 | ||
| 59 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 58 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 60 | { | 59 | { |
| 61 | if (cache_is_vivt()) { | 60 | if (cache_is_vivt()) { |
| 62 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | 61 | vivt_flush_cache_range(vma, start, end); |
| 63 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | ||
| 64 | vma->vm_flags); | ||
| 65 | return; | 62 | return; |
| 66 | } | 63 | } |
| 67 | 64 | ||
| @@ -71,27 +68,41 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned | |||
| 71 | : | 68 | : |
| 72 | : "r" (0) | 69 | : "r" (0) |
| 73 | : "cc"); | 70 | : "cc"); |
| 74 | __flush_icache_all(); | ||
| 75 | } | 71 | } |
| 72 | |||
| 73 | if (vma->vm_flags & VM_EXEC) | ||
| 74 | __flush_icache_all(); | ||
| 76 | } | 75 | } |
| 77 | 76 | ||
| 78 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 77 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
| 79 | { | 78 | { |
| 80 | if (cache_is_vivt()) { | 79 | if (cache_is_vivt()) { |
| 81 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 80 | vivt_flush_cache_page(vma, user_addr, pfn); |
| 82 | unsigned long addr = user_addr & PAGE_MASK; | ||
| 83 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | ||
| 84 | } | ||
| 85 | return; | 81 | return; |
| 86 | } | 82 | } |
| 87 | 83 | ||
| 88 | if (cache_is_vipt_aliasing()) | 84 | if (cache_is_vipt_aliasing()) { |
| 89 | flush_pfn_alias(pfn, user_addr); | 85 | flush_pfn_alias(pfn, user_addr); |
| 86 | __flush_icache_all(); | ||
| 87 | } | ||
| 88 | |||
| 89 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | ||
| 90 | __flush_icache_all(); | ||
| 90 | } | 91 | } |
| 92 | #else | ||
| 93 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | ||
| 94 | #endif | ||
| 91 | 95 | ||
| 96 | #ifdef CONFIG_SMP | ||
| 97 | static void flush_ptrace_access_other(void *args) | ||
| 98 | { | ||
| 99 | __flush_icache_all(); | ||
| 100 | } | ||
| 101 | #endif | ||
| 102 | |||
| 103 | static | ||
| 92 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 104 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
| 93 | unsigned long uaddr, void *kaddr, | 105 | unsigned long uaddr, void *kaddr, unsigned long len) |
| 94 | unsigned long len, int write) | ||
| 95 | { | 106 | { |
| 96 | if (cache_is_vivt()) { | 107 | if (cache_is_vivt()) { |
| 97 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 108 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
| @@ -103,20 +114,42 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
| 103 | 114 | ||
| 104 | if (cache_is_vipt_aliasing()) { | 115 | if (cache_is_vipt_aliasing()) { |
| 105 | flush_pfn_alias(page_to_pfn(page), uaddr); | 116 | flush_pfn_alias(page_to_pfn(page), uaddr); |
| 117 | __flush_icache_all(); | ||
| 106 | return; | 118 | return; |
| 107 | } | 119 | } |
| 108 | 120 | ||
| 109 | /* VIPT non-aliasing cache */ | 121 | /* VIPT non-aliasing cache */ |
| 110 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && | 122 | if (vma->vm_flags & VM_EXEC) { |
| 111 | vma->vm_flags & VM_EXEC) { | ||
| 112 | unsigned long addr = (unsigned long)kaddr; | 123 | unsigned long addr = (unsigned long)kaddr; |
| 113 | /* only flushing the kernel mapping on non-aliasing VIPT */ | ||
| 114 | __cpuc_coherent_kern_range(addr, addr + len); | 124 | __cpuc_coherent_kern_range(addr, addr + len); |
| 125 | #ifdef CONFIG_SMP | ||
| 126 | if (cache_ops_need_broadcast()) | ||
| 127 | smp_call_function(flush_ptrace_access_other, | ||
| 128 | NULL, 1); | ||
| 129 | #endif | ||
| 115 | } | 130 | } |
| 116 | } | 131 | } |
| 117 | #else | 132 | |
| 118 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | 133 | /* |
| 134 | * Copy user data from/to a page which is mapped into a different | ||
| 135 | * processes address space. Really, we want to allow our "user | ||
| 136 | * space" model to handle this. | ||
| 137 | * | ||
| 138 | * Note that this code needs to run on the current CPU. | ||
| 139 | */ | ||
| 140 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
| 141 | unsigned long uaddr, void *dst, const void *src, | ||
| 142 | unsigned long len) | ||
| 143 | { | ||
| 144 | #ifdef CONFIG_SMP | ||
| 145 | preempt_disable(); | ||
| 146 | #endif | ||
| 147 | memcpy(dst, src, len); | ||
| 148 | flush_ptrace_access(vma, page, uaddr, dst, len); | ||
| 149 | #ifdef CONFIG_SMP | ||
| 150 | preempt_enable(); | ||
| 119 | #endif | 151 | #endif |
| 152 | } | ||
| 120 | 153 | ||
| 121 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 154 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
| 122 | { | 155 | { |
| @@ -125,14 +158,20 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
| 125 | * page. This ensures that data in the physical page is mutually | 158 | * page. This ensures that data in the physical page is mutually |
| 126 | * coherent with the kernels mapping. | 159 | * coherent with the kernels mapping. |
| 127 | */ | 160 | */ |
| 128 | #ifdef CONFIG_HIGHMEM | 161 | if (!PageHighMem(page)) { |
| 129 | /* | 162 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
| 130 | * kmap_atomic() doesn't set the page virtual address, and | 163 | } else { |
| 131 | * kunmap_atomic() takes care of cache flushing already. | 164 | void *addr = kmap_high_get(page); |
| 132 | */ | 165 | if (addr) { |
| 133 | if (page_address(page)) | 166 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
| 134 | #endif | 167 | kunmap_high(page); |
| 135 | __cpuc_flush_dcache_page(page_address(page)); | 168 | } else if (cache_is_vipt()) { |
| 169 | pte_t saved_pte; | ||
| 170 | addr = kmap_high_l1_vipt(page, &saved_pte); | ||
| 171 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
| 172 | kunmap_high_l1_vipt(page, saved_pte); | ||
| 173 | } | ||
| 174 | } | ||
| 136 | 175 | ||
| 137 | /* | 176 | /* |
| 138 | * If this is a page cache page, and we have an aliasing VIPT cache, | 177 | * If this is a page cache page, and we have an aliasing VIPT cache, |
| @@ -196,7 +235,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
| 196 | */ | 235 | */ |
| 197 | void flush_dcache_page(struct page *page) | 236 | void flush_dcache_page(struct page *page) |
| 198 | { | 237 | { |
| 199 | struct address_space *mapping = page_mapping(page); | 238 | struct address_space *mapping; |
| 239 | |||
| 240 | /* | ||
| 241 | * The zero page is never written to, so never has any dirty | ||
| 242 | * cache lines, and therefore never needs to be flushed. | ||
| 243 | */ | ||
| 244 | if (page == ZERO_PAGE(0)) | ||
| 245 | return; | ||
| 246 | |||
| 247 | mapping = page_mapping(page); | ||
| 200 | 248 | ||
| 201 | #ifndef CONFIG_SMP | 249 | #ifndef CONFIG_SMP |
| 202 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) | 250 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) |
| @@ -242,6 +290,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l | |||
| 242 | * userspace address only. | 290 | * userspace address only. |
| 243 | */ | 291 | */ |
| 244 | flush_pfn_alias(pfn, vmaddr); | 292 | flush_pfn_alias(pfn, vmaddr); |
| 293 | __flush_icache_all(); | ||
| 245 | } | 294 | } |
| 246 | 295 | ||
| 247 | /* | 296 | /* |
| @@ -249,5 +298,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l | |||
| 249 | * in this mapping of the page. FIXME: this is overkill | 298 | * in this mapping of the page. FIXME: this is overkill |
| 250 | * since we actually ask for a write-back and invalidate. | 299 | * since we actually ask for a write-back and invalidate. |
| 251 | */ | 300 | */ |
| 252 | __cpuc_flush_dcache_page(page_address(page)); | 301 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
| 253 | } | 302 | } |
