diff options
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 75 |
1 files changed, 75 insertions, 0 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e1489c54cd12..bff71388e72a 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | |||
363 | flush_cache_all(); | 363 | flush_cache_all(); |
364 | } | 364 | } |
365 | 365 | ||
366 | /* | ||
367 | * Memory synchronization helpers for mixed cached vs non cached accesses. | ||
368 | * | ||
369 | * Some synchronization algorithms have to set states in memory with the | ||
370 | * cache enabled or disabled depending on the code path. It is crucial | ||
371 | * to always ensure proper cache maintenance to update main memory right | ||
372 | * away in that case. | ||
373 | * | ||
374 | * Any cached write must be followed by a cache clean operation. | ||
375 | * Any cached read must be preceded by a cache invalidate operation. | ||
376 | * Yet, in the read case, a cache flush i.e. atomic clean+invalidate | ||
377 | * operation is needed to avoid discarding possible concurrent writes to the | ||
378 | * accessed memory. | ||
379 | * | ||
380 | * Also, in order to prevent a cached writer from interfering with an | ||
381 | * adjacent non-cached writer, each state variable must be located to | ||
382 | * a separate cache line. | ||
383 | */ | ||
384 | |||
385 | /* | ||
386 | * This needs to be >= the max cache writeback size of all | ||
387 | * supported platforms included in the current kernel configuration. | ||
388 | * This is used to align state variables to their own cache lines. | ||
389 | */ | ||
390 | #define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ | ||
391 | #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) | ||
392 | |||
393 | /* | ||
394 | * There is no __cpuc_clean_dcache_area but we use it anyway for | ||
395 | * code intent clarity, and alias it to __cpuc_flush_dcache_area. | ||
396 | */ | ||
397 | #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area | ||
398 | |||
399 | /* | ||
400 | * Ensure preceding writes to *p by this CPU are visible to | ||
401 | * subsequent reads by other CPUs: | ||
402 | */ | ||
403 | static inline void __sync_cache_range_w(volatile void *p, size_t size) | ||
404 | { | ||
405 | char *_p = (char *)p; | ||
406 | |||
407 | __cpuc_clean_dcache_area(_p, size); | ||
408 | outer_clean_range(__pa(_p), __pa(_p + size)); | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * Ensure preceding writes to *p by other CPUs are visible to | ||
413 | * subsequent reads by this CPU. We must be careful not to | ||
414 | * discard data simultaneously written by another CPU, hence the | ||
415 | * usage of flush rather than invalidate operations. | ||
416 | */ | ||
417 | static inline void __sync_cache_range_r(volatile void *p, size_t size) | ||
418 | { | ||
419 | char *_p = (char *)p; | ||
420 | |||
421 | #ifdef CONFIG_OUTER_CACHE | ||
422 | if (outer_cache.flush_range) { | ||
423 | /* | ||
424 | * Ensure dirty data migrated from other CPUs into our cache | ||
425 | * are cleaned out safely before the outer cache is cleaned: | ||
426 | */ | ||
427 | __cpuc_clean_dcache_area(_p, size); | ||
428 | |||
429 | /* Clean and invalidate stale data for *p from outer ... */ | ||
430 | outer_flush_range(__pa(_p), __pa(_p + size)); | ||
431 | } | ||
432 | #endif | ||
433 | |||
434 | /* ... and inner cache: */ | ||
435 | __cpuc_flush_dcache_area(_p, size); | ||
436 | } | ||
437 | |||
438 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) | ||
439 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) | ||
440 | |||
366 | #endif | 441 | #endif |