diff options
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
| -rw-r--r-- | arch/arm/include/asm/cacheflush.h | 537 |
1 files changed, 537 insertions, 0 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h new file mode 100644 index 000000000000..9073d9c6567e --- /dev/null +++ b/arch/arm/include/asm/cacheflush.h | |||
| @@ -0,0 +1,537 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/include/asm/cacheflush.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 1999-2002 Russell King | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | #ifndef _ASMARM_CACHEFLUSH_H | ||
| 11 | #define _ASMARM_CACHEFLUSH_H | ||
| 12 | |||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/mm.h> | ||
| 15 | |||
| 16 | #include <asm/glue.h> | ||
| 17 | #include <asm/shmparam.h> | ||
| 18 | |||
| 19 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Cache Model | ||
| 23 | * =========== | ||
| 24 | */ | ||
| 25 | #undef _CACHE | ||
| 26 | #undef MULTI_CACHE | ||
| 27 | |||
| 28 | #if defined(CONFIG_CPU_CACHE_V3) | ||
| 29 | # ifdef _CACHE | ||
| 30 | # define MULTI_CACHE 1 | ||
| 31 | # else | ||
| 32 | # define _CACHE v3 | ||
| 33 | # endif | ||
| 34 | #endif | ||
| 35 | |||
| 36 | #if defined(CONFIG_CPU_CACHE_V4) | ||
| 37 | # ifdef _CACHE | ||
| 38 | # define MULTI_CACHE 1 | ||
| 39 | # else | ||
| 40 | # define _CACHE v4 | ||
| 41 | # endif | ||
| 42 | #endif | ||
| 43 | |||
| 44 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | ||
| 45 | defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) | ||
| 46 | # define MULTI_CACHE 1 | ||
| 47 | #endif | ||
| 48 | |||
| 49 | #if defined(CONFIG_CPU_ARM926T) | ||
| 50 | # ifdef _CACHE | ||
| 51 | # define MULTI_CACHE 1 | ||
| 52 | # else | ||
| 53 | # define _CACHE arm926 | ||
| 54 | # endif | ||
| 55 | #endif | ||
| 56 | |||
| 57 | #if defined(CONFIG_CPU_ARM940T) | ||
| 58 | # ifdef _CACHE | ||
| 59 | # define MULTI_CACHE 1 | ||
| 60 | # else | ||
| 61 | # define _CACHE arm940 | ||
| 62 | # endif | ||
| 63 | #endif | ||
| 64 | |||
| 65 | #if defined(CONFIG_CPU_ARM946E) | ||
| 66 | # ifdef _CACHE | ||
| 67 | # define MULTI_CACHE 1 | ||
| 68 | # else | ||
| 69 | # define _CACHE arm946 | ||
| 70 | # endif | ||
| 71 | #endif | ||
| 72 | |||
| 73 | #if defined(CONFIG_CPU_CACHE_V4WB) | ||
| 74 | # ifdef _CACHE | ||
| 75 | # define MULTI_CACHE 1 | ||
| 76 | # else | ||
| 77 | # define _CACHE v4wb | ||
| 78 | # endif | ||
| 79 | #endif | ||
| 80 | |||
| 81 | #if defined(CONFIG_CPU_XSCALE) | ||
| 82 | # ifdef _CACHE | ||
| 83 | # define MULTI_CACHE 1 | ||
| 84 | # else | ||
| 85 | # define _CACHE xscale | ||
| 86 | # endif | ||
| 87 | #endif | ||
| 88 | |||
| 89 | #if defined(CONFIG_CPU_XSC3) | ||
| 90 | # ifdef _CACHE | ||
| 91 | # define MULTI_CACHE 1 | ||
| 92 | # else | ||
| 93 | # define _CACHE xsc3 | ||
| 94 | # endif | ||
| 95 | #endif | ||
| 96 | |||
| 97 | #if defined(CONFIG_CPU_FEROCEON) | ||
| 98 | # define MULTI_CACHE 1 | ||
| 99 | #endif | ||
| 100 | |||
| 101 | #if defined(CONFIG_CPU_V6) | ||
| 102 | //# ifdef _CACHE | ||
| 103 | # define MULTI_CACHE 1 | ||
| 104 | //# else | ||
| 105 | //# define _CACHE v6 | ||
| 106 | //# endif | ||
| 107 | #endif | ||
| 108 | |||
| 109 | #if defined(CONFIG_CPU_V7) | ||
| 110 | //# ifdef _CACHE | ||
| 111 | # define MULTI_CACHE 1 | ||
| 112 | //# else | ||
| 113 | //# define _CACHE v7 | ||
| 114 | //# endif | ||
| 115 | #endif | ||
| 116 | |||
| 117 | #if !defined(_CACHE) && !defined(MULTI_CACHE) | ||
| 118 | #error Unknown cache maintainence model | ||
| 119 | #endif | ||
| 120 | |||
| 121 | /* | ||
| 122 | * This flag is used to indicate that the page pointed to by a pte | ||
| 123 | * is dirty and requires cleaning before returning it to the user. | ||
| 124 | */ | ||
| 125 | #define PG_dcache_dirty PG_arch_1 | ||
| 126 | |||
| 127 | /* | ||
| 128 | * MM Cache Management | ||
| 129 | * =================== | ||
| 130 | * | ||
| 131 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files | ||
| 132 | * implement these methods. | ||
| 133 | * | ||
| 134 | * Start addresses are inclusive and end addresses are exclusive; | ||
| 135 | * start addresses should be rounded down, end addresses up. | ||
| 136 | * | ||
| 137 | * See Documentation/cachetlb.txt for more information. | ||
| 138 | * Please note that the implementation of these, and the required | ||
| 139 | * effects are cache-type (VIVT/VIPT/PIPT) specific. | ||
| 140 | * | ||
| 141 | * flush_cache_kern_all() | ||
| 142 | * | ||
| 143 | * Unconditionally clean and invalidate the entire cache. | ||
| 144 | * | ||
| 145 | * flush_cache_user_mm(mm) | ||
| 146 | * | ||
| 147 | * Clean and invalidate all user space cache entries | ||
| 148 | * before a change of page tables. | ||
| 149 | * | ||
| 150 | * flush_cache_user_range(start, end, flags) | ||
| 151 | * | ||
| 152 | * Clean and invalidate a range of cache entries in the | ||
| 153 | * specified address space before a change of page tables. | ||
| 154 | * - start - user start address (inclusive, page aligned) | ||
| 155 | * - end - user end address (exclusive, page aligned) | ||
| 156 | * - flags - vma->vm_flags field | ||
| 157 | * | ||
| 158 | * coherent_kern_range(start, end) | ||
| 159 | * | ||
| 160 | * Ensure coherency between the Icache and the Dcache in the | ||
| 161 | * region described by start, end. If you have non-snooping | ||
| 162 | * Harvard caches, you need to implement this function. | ||
| 163 | * - start - virtual start address | ||
| 164 | * - end - virtual end address | ||
| 165 | * | ||
| 166 | * DMA Cache Coherency | ||
| 167 | * =================== | ||
| 168 | * | ||
| 169 | * dma_inv_range(start, end) | ||
| 170 | * | ||
| 171 | * Invalidate (discard) the specified virtual address range. | ||
| 172 | * May not write back any entries. If 'start' or 'end' | ||
| 173 | * are not cache line aligned, those lines must be written | ||
| 174 | * back. | ||
| 175 | * - start - virtual start address | ||
| 176 | * - end - virtual end address | ||
| 177 | * | ||
| 178 | * dma_clean_range(start, end) | ||
| 179 | * | ||
| 180 | * Clean (write back) the specified virtual address range. | ||
| 181 | * - start - virtual start address | ||
| 182 | * - end - virtual end address | ||
| 183 | * | ||
| 184 | * dma_flush_range(start, end) | ||
| 185 | * | ||
| 186 | * Clean and invalidate the specified virtual address range. | ||
| 187 | * - start - virtual start address | ||
| 188 | * - end - virtual end address | ||
| 189 | */ | ||
| 190 | |||
| 191 | struct cpu_cache_fns { | ||
| 192 | void (*flush_kern_all)(void); | ||
| 193 | void (*flush_user_all)(void); | ||
| 194 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | ||
| 195 | |||
| 196 | void (*coherent_kern_range)(unsigned long, unsigned long); | ||
| 197 | void (*coherent_user_range)(unsigned long, unsigned long); | ||
| 198 | void (*flush_kern_dcache_page)(void *); | ||
| 199 | |||
| 200 | void (*dma_inv_range)(const void *, const void *); | ||
| 201 | void (*dma_clean_range)(const void *, const void *); | ||
| 202 | void (*dma_flush_range)(const void *, const void *); | ||
| 203 | }; | ||
| 204 | |||
| 205 | struct outer_cache_fns { | ||
| 206 | void (*inv_range)(unsigned long, unsigned long); | ||
| 207 | void (*clean_range)(unsigned long, unsigned long); | ||
| 208 | void (*flush_range)(unsigned long, unsigned long); | ||
| 209 | }; | ||
| 210 | |||
| 211 | /* | ||
| 212 | * Select the calling method | ||
| 213 | */ | ||
| 214 | #ifdef MULTI_CACHE | ||
| 215 | |||
| 216 | extern struct cpu_cache_fns cpu_cache; | ||
| 217 | |||
| 218 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all | ||
| 219 | #define __cpuc_flush_user_all cpu_cache.flush_user_all | ||
| 220 | #define __cpuc_flush_user_range cpu_cache.flush_user_range | ||
| 221 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range | ||
| 222 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range | ||
| 223 | #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page | ||
| 224 | |||
| 225 | /* | ||
| 226 | * These are private to the dma-mapping API. Do not use directly. | ||
| 227 | * Their sole purpose is to ensure that data held in the cache | ||
| 228 | * is visible to DMA, or data written by DMA to system memory is | ||
| 229 | * visible to the CPU. | ||
| 230 | */ | ||
| 231 | #define dmac_inv_range cpu_cache.dma_inv_range | ||
| 232 | #define dmac_clean_range cpu_cache.dma_clean_range | ||
| 233 | #define dmac_flush_range cpu_cache.dma_flush_range | ||
| 234 | |||
| 235 | #else | ||
| 236 | |||
| 237 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | ||
| 238 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | ||
| 239 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | ||
| 240 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | ||
| 241 | #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) | ||
| 242 | #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) | ||
| 243 | |||
| 244 | extern void __cpuc_flush_kern_all(void); | ||
| 245 | extern void __cpuc_flush_user_all(void); | ||
| 246 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | ||
| 247 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | ||
| 248 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); | ||
| 249 | extern void __cpuc_flush_dcache_page(void *); | ||
| 250 | |||
| 251 | /* | ||
| 252 | * These are private to the dma-mapping API. Do not use directly. | ||
| 253 | * Their sole purpose is to ensure that data held in the cache | ||
| 254 | * is visible to DMA, or data written by DMA to system memory is | ||
| 255 | * visible to the CPU. | ||
| 256 | */ | ||
| 257 | #define dmac_inv_range __glue(_CACHE,_dma_inv_range) | ||
| 258 | #define dmac_clean_range __glue(_CACHE,_dma_clean_range) | ||
| 259 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | ||
| 260 | |||
| 261 | extern void dmac_inv_range(const void *, const void *); | ||
| 262 | extern void dmac_clean_range(const void *, const void *); | ||
| 263 | extern void dmac_flush_range(const void *, const void *); | ||
| 264 | |||
| 265 | #endif | ||
| 266 | |||
| 267 | #ifdef CONFIG_OUTER_CACHE | ||
| 268 | |||
| 269 | extern struct outer_cache_fns outer_cache; | ||
| 270 | |||
| 271 | static inline void outer_inv_range(unsigned long start, unsigned long end) | ||
| 272 | { | ||
| 273 | if (outer_cache.inv_range) | ||
| 274 | outer_cache.inv_range(start, end); | ||
| 275 | } | ||
| 276 | static inline void outer_clean_range(unsigned long start, unsigned long end) | ||
| 277 | { | ||
| 278 | if (outer_cache.clean_range) | ||
| 279 | outer_cache.clean_range(start, end); | ||
| 280 | } | ||
| 281 | static inline void outer_flush_range(unsigned long start, unsigned long end) | ||
| 282 | { | ||
| 283 | if (outer_cache.flush_range) | ||
| 284 | outer_cache.flush_range(start, end); | ||
| 285 | } | ||
| 286 | |||
| 287 | #else | ||
| 288 | |||
| 289 | static inline void outer_inv_range(unsigned long start, unsigned long end) | ||
| 290 | { } | ||
| 291 | static inline void outer_clean_range(unsigned long start, unsigned long end) | ||
| 292 | { } | ||
| 293 | static inline void outer_flush_range(unsigned long start, unsigned long end) | ||
| 294 | { } | ||
| 295 | |||
| 296 | #endif | ||
| 297 | |||
| 298 | /* | ||
| 299 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | ||
| 300 | * vmalloc, ioremap etc) in kernel space for pages. Since the | ||
| 301 | * direct-mappings of these pages may contain cached data, we need | ||
| 302 | * to do a full cache flush to ensure that writebacks don't corrupt | ||
| 303 | * data placed into these pages via the new mappings. | ||
| 304 | */ | ||
| 305 | #define flush_cache_vmap(start, end) flush_cache_all() | ||
| 306 | #define flush_cache_vunmap(start, end) flush_cache_all() | ||
| 307 | |||
| 308 | /* | ||
| 309 | * Copy user data from/to a page which is mapped into a different | ||
| 310 | * processes address space. Really, we want to allow our "user | ||
| 311 | * space" model to handle this. | ||
| 312 | */ | ||
| 313 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
| 314 | do { \ | ||
| 315 | memcpy(dst, src, len); \ | ||
| 316 | flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ | ||
| 317 | } while (0) | ||
| 318 | |||
| 319 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
| 320 | do { \ | ||
| 321 | memcpy(dst, src, len); \ | ||
| 322 | } while (0) | ||
| 323 | |||
| 324 | /* | ||
| 325 | * Convert calls to our calling convention. | ||
| 326 | */ | ||
| 327 | #define flush_cache_all() __cpuc_flush_kern_all() | ||
| 328 | #ifndef CONFIG_CPU_CACHE_VIPT | ||
| 329 | static inline void flush_cache_mm(struct mm_struct *mm) | ||
| 330 | { | ||
| 331 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | ||
| 332 | __cpuc_flush_user_all(); | ||
| 333 | } | ||
| 334 | |||
| 335 | static inline void | ||
| 336 | flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
| 337 | { | ||
| 338 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) | ||
| 339 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | ||
| 340 | vma->vm_flags); | ||
| 341 | } | ||
| 342 | |||
| 343 | static inline void | ||
| 344 | flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | ||
| 345 | { | ||
| 346 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | ||
| 347 | unsigned long addr = user_addr & PAGE_MASK; | ||
| 348 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | ||
| 349 | } | ||
| 350 | } | ||
| 351 | |||
| 352 | static inline void | ||
| 353 | flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | ||
| 354 | unsigned long uaddr, void *kaddr, | ||
| 355 | unsigned long len, int write) | ||
| 356 | { | ||
| 357 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | ||
| 358 | unsigned long addr = (unsigned long)kaddr; | ||
| 359 | __cpuc_coherent_kern_range(addr, addr + len); | ||
| 360 | } | ||
| 361 | } | ||
| 362 | #else | ||
| 363 | extern void flush_cache_mm(struct mm_struct *mm); | ||
| 364 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
| 365 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); | ||
| 366 | extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | ||
| 367 | unsigned long uaddr, void *kaddr, | ||
| 368 | unsigned long len, int write); | ||
| 369 | #endif | ||
| 370 | |||
| 371 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | ||
| 372 | |||
| 373 | /* | ||
| 374 | * flush_cache_user_range is used when we want to ensure that the | ||
| 375 | * Harvard caches are synchronised for the user space address range. | ||
| 376 | * This is used for the ARM private sys_cacheflush system call. | ||
| 377 | */ | ||
| 378 | #define flush_cache_user_range(vma,start,end) \ | ||
| 379 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) | ||
| 380 | |||
| 381 | /* | ||
| 382 | * Perform necessary cache operations to ensure that data previously | ||
| 383 | * stored within this range of addresses can be executed by the CPU. | ||
| 384 | */ | ||
| 385 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) | ||
| 386 | |||
| 387 | /* | ||
| 388 | * Perform necessary cache operations to ensure that the TLB will | ||
| 389 | * see data written in the specified area. | ||
| 390 | */ | ||
| 391 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) | ||
| 392 | |||
| 393 | /* | ||
| 394 | * flush_dcache_page is used when the kernel has written to the page | ||
| 395 | * cache page at virtual address page->virtual. | ||
| 396 | * | ||
| 397 | * If this page isn't mapped (ie, page_mapping == NULL), or it might | ||
| 398 | * have userspace mappings, then we _must_ always clean + invalidate | ||
| 399 | * the dcache entries associated with the kernel mapping. | ||
| 400 | * | ||
| 401 | * Otherwise we can defer the operation, and clean the cache when we are | ||
| 402 | * about to change to user space. This is the same method as used on SPARC64. | ||
| 403 | * See update_mmu_cache for the user space part. | ||
| 404 | */ | ||
| 405 | extern void flush_dcache_page(struct page *); | ||
| 406 | |||
| 407 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); | ||
| 408 | |||
| 409 | static inline void __flush_icache_all(void) | ||
| 410 | { | ||
| 411 | asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" | ||
| 412 | : | ||
| 413 | : "r" (0)); | ||
| 414 | } | ||
| 415 | |||
| 416 | #define ARCH_HAS_FLUSH_ANON_PAGE | ||
| 417 | static inline void flush_anon_page(struct vm_area_struct *vma, | ||
| 418 | struct page *page, unsigned long vmaddr) | ||
| 419 | { | ||
| 420 | extern void __flush_anon_page(struct vm_area_struct *vma, | ||
| 421 | struct page *, unsigned long); | ||
| 422 | if (PageAnon(page)) | ||
| 423 | __flush_anon_page(vma, page, vmaddr); | ||
| 424 | } | ||
| 425 | |||
| 426 | #define flush_dcache_mmap_lock(mapping) \ | ||
| 427 | spin_lock_irq(&(mapping)->tree_lock) | ||
| 428 | #define flush_dcache_mmap_unlock(mapping) \ | ||
| 429 | spin_unlock_irq(&(mapping)->tree_lock) | ||
| 430 | |||
| 431 | #define flush_icache_user_range(vma,page,addr,len) \ | ||
| 432 | flush_dcache_page(page) | ||
| 433 | |||
| 434 | /* | ||
| 435 | * We don't appear to need to do anything here. In fact, if we did, we'd | ||
| 436 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | ||
| 437 | */ | ||
| 438 | #define flush_icache_page(vma,page) do { } while (0) | ||
| 439 | |||
| 440 | static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, | ||
| 441 | unsigned offset, size_t size) | ||
| 442 | { | ||
| 443 | const void *start = (void __force *)virt + offset; | ||
| 444 | dmac_inv_range(start, start + size); | ||
| 445 | } | ||
| 446 | |||
| 447 | #define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) | ||
| 448 | #define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29)) | ||
| 449 | |||
| 450 | #define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25)) | ||
| 451 | #define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25)) | ||
| 452 | #define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) | ||
| 453 | #define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) | ||
| 454 | |||
| 455 | #define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val)) | ||
| 456 | #define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val)) | ||
| 457 | #define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val)) | ||
| 458 | #define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val)) | ||
| 459 | #define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0) | ||
| 460 | |||
| 461 | #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) | ||
| 462 | /* | ||
| 463 | * VIVT caches only | ||
| 464 | */ | ||
| 465 | #define cache_is_vivt() 1 | ||
| 466 | #define cache_is_vipt() 0 | ||
| 467 | #define cache_is_vipt_nonaliasing() 0 | ||
| 468 | #define cache_is_vipt_aliasing() 0 | ||
| 469 | #define icache_is_vivt_asid_tagged() 0 | ||
| 470 | |||
| 471 | #elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) | ||
| 472 | /* | ||
| 473 | * VIPT caches only | ||
| 474 | */ | ||
| 475 | #define cache_is_vivt() 0 | ||
| 476 | #define cache_is_vipt() 1 | ||
| 477 | #define cache_is_vipt_nonaliasing() \ | ||
| 478 | ({ \ | ||
| 479 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 480 | __cacheid_vipt_nonaliasing(__val); \ | ||
| 481 | }) | ||
| 482 | |||
| 483 | #define cache_is_vipt_aliasing() \ | ||
| 484 | ({ \ | ||
| 485 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 486 | __cacheid_vipt_aliasing(__val); \ | ||
| 487 | }) | ||
| 488 | |||
| 489 | #define icache_is_vivt_asid_tagged() \ | ||
| 490 | ({ \ | ||
| 491 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 492 | __cacheid_vivt_asid_tagged_instr(__val); \ | ||
| 493 | }) | ||
| 494 | |||
| 495 | #else | ||
| 496 | /* | ||
| 497 | * VIVT or VIPT caches. Note that this is unreliable since ARM926 | ||
| 498 | * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test. | ||
| 499 | * There's no way to tell from the CacheType register what type (!) | ||
| 500 | * the cache is. | ||
| 501 | */ | ||
| 502 | #define cache_is_vivt() \ | ||
| 503 | ({ \ | ||
| 504 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 505 | (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ | ||
| 506 | }) | ||
| 507 | |||
| 508 | #define cache_is_vipt() \ | ||
| 509 | ({ \ | ||
| 510 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 511 | __cacheid_present(__val) && __cacheid_vipt(__val); \ | ||
| 512 | }) | ||
| 513 | |||
| 514 | #define cache_is_vipt_nonaliasing() \ | ||
| 515 | ({ \ | ||
| 516 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 517 | __cacheid_present(__val) && \ | ||
| 518 | __cacheid_vipt_nonaliasing(__val); \ | ||
| 519 | }) | ||
| 520 | |||
| 521 | #define cache_is_vipt_aliasing() \ | ||
| 522 | ({ \ | ||
| 523 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 524 | __cacheid_present(__val) && \ | ||
| 525 | __cacheid_vipt_aliasing(__val); \ | ||
| 526 | }) | ||
| 527 | |||
| 528 | #define icache_is_vivt_asid_tagged() \ | ||
| 529 | ({ \ | ||
| 530 | unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ | ||
| 531 | __cacheid_present(__val) && \ | ||
| 532 | __cacheid_vivt_asid_tagged_instr(__val); \ | ||
| 533 | }) | ||
| 534 | |||
| 535 | #endif | ||
| 536 | |||
| 537 | #endif | ||
