diff options
Diffstat (limited to 'arch/arc/mm/cache.c')
-rw-r--r-- | arch/arc/mm/cache.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 25c631942500..f2701c13a66b 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) | |||
65 | 65 | ||
66 | n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", | 66 | n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", |
67 | perip_base, | 67 | perip_base, |
68 | IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); | 68 | IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); |
69 | 69 | ||
70 | return buf; | 70 | return buf; |
71 | } | 71 | } |
@@ -897,15 +897,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) | |||
897 | } | 897 | } |
898 | 898 | ||
899 | /* | 899 | /* |
900 | * DMA ops for systems with IOC | ||
901 | * IOC hardware snoops all DMA traffic keeping the caches consistent with | ||
902 | * memory - eliding need for any explicit cache maintenance of DMA buffers | ||
903 | */ | ||
904 | static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {} | ||
905 | static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {} | ||
906 | static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {} | ||
907 | |||
908 | /* | ||
909 | * Exported DMA API | 900 | * Exported DMA API |
910 | */ | 901 | */ |
911 | void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) | 902 | void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) |
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void) | |||
1153 | { | 1144 | { |
1154 | unsigned int ioc_base, mem_sz; | 1145 | unsigned int ioc_base, mem_sz; |
1155 | 1146 | ||
1147 | /* | ||
1148 | * As for today we don't support both IOC and ZONE_HIGHMEM enabled | ||
1149 | * simultaneously. This happens because as of today IOC aperture covers | ||
1150 | * only ZONE_NORMAL (low mem) and any dma transactions outside this | ||
1151 | * region won't be HW coherent. | ||
1152 | * If we want to use both IOC and ZONE_HIGHMEM we can use | ||
1153 | * bounce_buffer to handle dma transactions to HIGHMEM. | ||
1154 | * Also it is possible to modify dma_direct cache ops or increase IOC | ||
1155 | * aperture size if we are planning to use HIGHMEM without PAE. | ||
1156 | */ | ||
1157 | if (IS_ENABLED(CONFIG_HIGHMEM)) | ||
1158 | panic("IOC and HIGHMEM can't be used simultaneously"); | ||
1159 | |||
1156 | /* Flush + invalidate + disable L1 dcache */ | 1160 | /* Flush + invalidate + disable L1 dcache */ |
1157 | __dc_disable(); | 1161 | __dc_disable(); |
1158 | 1162 | ||
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void) | |||
1264 | if (is_isa_arcv2() && ioc_enable) | 1268 | if (is_isa_arcv2() && ioc_enable) |
1265 | arc_ioc_setup(); | 1269 | arc_ioc_setup(); |
1266 | 1270 | ||
1267 | if (is_isa_arcv2() && ioc_enable) { | 1271 | if (is_isa_arcv2() && l2_line_sz && slc_enable) { |
1268 | __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; | ||
1269 | __dma_cache_inv = __dma_cache_inv_ioc; | ||
1270 | __dma_cache_wback = __dma_cache_wback_ioc; | ||
1271 | } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { | ||
1272 | __dma_cache_wback_inv = __dma_cache_wback_inv_slc; | 1272 | __dma_cache_wback_inv = __dma_cache_wback_inv_slc; |
1273 | __dma_cache_inv = __dma_cache_inv_slc; | 1273 | __dma_cache_inv = __dma_cache_inv_slc; |
1274 | __dma_cache_wback = __dma_cache_wback_slc; | 1274 | __dma_cache_wback = __dma_cache_wback_slc; |
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void) | |||
1277 | __dma_cache_inv = __dma_cache_inv_l1; | 1277 | __dma_cache_inv = __dma_cache_inv_l1; |
1278 | __dma_cache_wback = __dma_cache_wback_l1; | 1278 | __dma_cache_wback = __dma_cache_wback_l1; |
1279 | } | 1279 | } |
1280 | /* | ||
1281 | * In case of IOC (say IOC+SLC case), pointers above could still be set | ||
1282 | * but end up not being relevant as the first function in chain is not | ||
1283 | * called at all for @dma_direct_ops | ||
1284 | * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() | ||
1285 | */ | ||
1280 | } | 1286 | } |
1281 | 1287 | ||
1282 | void __ref arc_cache_init(void) | 1288 | void __ref arc_cache_init(void) |