diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2018-07-11 17:33:41 -0400 |
---|---|---|
committer | Max Filippov <jcmvbkbc@gmail.com> | 2018-07-11 18:59:41 -0400 |
commit | 2cc15e802b250a11ece57ea54f82993cf3430867 (patch) | |
tree | d67e3892c92e077bebeac05db22765376e8de5c1 | |
parent | adbfa4e6f99689511f1079452508d9d22417544c (diff) |
xtensa: platform-specific handling of coherent memory
Memory layout is not fixed for noMMU xtensa configurations. Platforms
that need to use coherent DMA should implement platform_vaddr_* helpers
that check address type (cached/uncached) and convert addresses between
these types.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
-rw-r--r-- | arch/xtensa/include/asm/kmem_layout.h | 6 | ||||
-rw-r--r-- | arch/xtensa/include/asm/pgtable.h | 8 | ||||
-rw-r--r-- | arch/xtensa/include/asm/platform.h | 27 | ||||
-rw-r--r-- | arch/xtensa/kernel/pci-dma.c | 73 |
4 files changed, 93 insertions, 21 deletions
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 2317c835a4db..9c12babc016c 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h | |||
@@ -63,12 +63,6 @@ | |||
63 | #error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT | 63 | #error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #else | ||
67 | |||
68 | #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) | ||
69 | #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) | ||
70 | #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) | ||
71 | |||
72 | #endif | 66 | #endif |
73 | 67 | ||
74 | #ifndef CONFIG_KASAN | 68 | #ifndef CONFIG_KASAN |
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 38802259978f..29cfe421cf41 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h | |||
@@ -66,6 +66,7 @@ | |||
66 | #define FIRST_USER_ADDRESS 0UL | 66 | #define FIRST_USER_ADDRESS 0UL |
67 | #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) | 67 | #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) |
68 | 68 | ||
69 | #ifdef CONFIG_MMU | ||
69 | /* | 70 | /* |
70 | * Virtual memory area. We keep a distance to other memory regions to be | 71 | * Virtual memory area. We keep a distance to other memory regions to be |
71 | * on the safe side. We also use this area for cache aliasing. | 72 | * on the safe side. We also use this area for cache aliasing. |
@@ -80,6 +81,13 @@ | |||
80 | #define TLBTEMP_SIZE ICACHE_WAY_SIZE | 81 | #define TLBTEMP_SIZE ICACHE_WAY_SIZE |
81 | #endif | 82 | #endif |
82 | 83 | ||
84 | #else | ||
85 | |||
86 | #define VMALLOC_START __XTENSA_UL_CONST(0) | ||
87 | #define VMALLOC_END __XTENSA_UL_CONST(0xffffffff) | ||
88 | |||
89 | #endif | ||
90 | |||
83 | /* | 91 | /* |
84 | * For the Xtensa architecture, the PTE layout is as follows: | 92 | * For the Xtensa architecture, the PTE layout is as follows: |
85 | * | 93 | * |
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h index f8fbef67bc5f..560483356a06 100644 --- a/arch/xtensa/include/asm/platform.h +++ b/arch/xtensa/include/asm/platform.h | |||
@@ -75,4 +75,31 @@ extern void platform_calibrate_ccount (void); | |||
75 | */ | 75 | */ |
76 | void cpu_reset(void) __attribute__((noreturn)); | 76 | void cpu_reset(void) __attribute__((noreturn)); |
77 | 77 | ||
78 | /* | ||
79 | * Memory caching is platform-dependent in noMMU xtensa configurations. | ||
80 | * The following set of functions should be implemented in platform code | ||
81 | * in order to enable coherent DMA memory operations when CONFIG_MMU is not | ||
82 | * enabled. Default implementations do nothing and issue a warning. | ||
83 | */ | ||
84 | |||
85 | /* | ||
86 | * Check whether p points to a cached memory. | ||
87 | */ | ||
88 | bool platform_vaddr_cached(const void *p); | ||
89 | |||
90 | /* | ||
91 | * Check whether p points to an uncached memory. | ||
92 | */ | ||
93 | bool platform_vaddr_uncached(const void *p); | ||
94 | |||
95 | /* | ||
96 | * Return pointer to an uncached view of the cached sddress p. | ||
97 | */ | ||
98 | void *platform_vaddr_to_uncached(void *p); | ||
99 | |||
100 | /* | ||
101 | * Return pointer to a cached view of the uncached sddress p. | ||
102 | */ | ||
103 | void *platform_vaddr_to_cached(void *p); | ||
104 | |||
78 | #endif /* _XTENSA_PLATFORM_H */ | 105 | #endif /* _XTENSA_PLATFORM_H */ |
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 5c8a67fc3aa1..fe3343ddccaf 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/platform.h> | ||
27 | 28 | ||
28 | static void do_cache_op(phys_addr_t paddr, size_t size, | 29 | static void do_cache_op(phys_addr_t paddr, size_t size, |
29 | void (*fn)(unsigned long, unsigned long)) | 30 | void (*fn)(unsigned long, unsigned long)) |
@@ -84,6 +85,58 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | |||
84 | } | 85 | } |
85 | } | 86 | } |
86 | 87 | ||
88 | #ifdef CONFIG_MMU | ||
89 | bool platform_vaddr_cached(const void *p) | ||
90 | { | ||
91 | unsigned long addr = (unsigned long)p; | ||
92 | |||
93 | return addr >= XCHAL_KSEG_CACHED_VADDR && | ||
94 | addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE; | ||
95 | } | ||
96 | |||
97 | bool platform_vaddr_uncached(const void *p) | ||
98 | { | ||
99 | unsigned long addr = (unsigned long)p; | ||
100 | |||
101 | return addr >= XCHAL_KSEG_BYPASS_VADDR && | ||
102 | addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE; | ||
103 | } | ||
104 | |||
105 | void *platform_vaddr_to_uncached(void *p) | ||
106 | { | ||
107 | return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; | ||
108 | } | ||
109 | |||
110 | void *platform_vaddr_to_cached(void *p) | ||
111 | { | ||
112 | return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | ||
113 | } | ||
114 | #else | ||
115 | bool __attribute__((weak)) platform_vaddr_cached(const void *p) | ||
116 | { | ||
117 | WARN_ONCE(1, "Default %s implementation is used\n", __func__); | ||
118 | return true; | ||
119 | } | ||
120 | |||
121 | bool __attribute__((weak)) platform_vaddr_uncached(const void *p) | ||
122 | { | ||
123 | WARN_ONCE(1, "Default %s implementation is used\n", __func__); | ||
124 | return false; | ||
125 | } | ||
126 | |||
127 | void __attribute__((weak)) *platform_vaddr_to_uncached(void *p) | ||
128 | { | ||
129 | WARN_ONCE(1, "Default %s implementation is used\n", __func__); | ||
130 | return p; | ||
131 | } | ||
132 | |||
133 | void __attribute__((weak)) *platform_vaddr_to_cached(void *p) | ||
134 | { | ||
135 | WARN_ONCE(1, "Default %s implementation is used\n", __func__); | ||
136 | return p; | ||
137 | } | ||
138 | #endif | ||
139 | |||
87 | /* | 140 | /* |
88 | * Note: We assume that the full memory space is always mapped to 'kseg' | 141 | * Note: We assume that the full memory space is always mapped to 'kseg' |
89 | * Otherwise we have to use page attributes (not implemented). | 142 | * Otherwise we have to use page attributes (not implemented). |
@@ -92,8 +145,6 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | |||
92 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 145 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
93 | gfp_t flag, unsigned long attrs) | 146 | gfp_t flag, unsigned long attrs) |
94 | { | 147 | { |
95 | unsigned long ret; | ||
96 | unsigned long uncached; | ||
97 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 148 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
98 | struct page *page = NULL; | 149 | struct page *page = NULL; |
99 | 150 | ||
@@ -134,29 +185,21 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
134 | return p; | 185 | return p; |
135 | } | 186 | } |
136 | #endif | 187 | #endif |
137 | ret = (unsigned long)page_address(page); | 188 | BUG_ON(!platform_vaddr_cached(page_address(page))); |
138 | BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || | 189 | __invalidate_dcache_range((unsigned long)page_address(page), size); |
139 | ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); | 190 | return platform_vaddr_to_uncached(page_address(page)); |
140 | |||
141 | uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; | ||
142 | __invalidate_dcache_range(ret, size); | ||
143 | |||
144 | return (void *)uncached; | ||
145 | } | 191 | } |
146 | 192 | ||
147 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, | 193 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
148 | dma_addr_t dma_handle, unsigned long attrs) | 194 | dma_addr_t dma_handle, unsigned long attrs) |
149 | { | 195 | { |
150 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 196 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
151 | unsigned long addr = (unsigned long)vaddr; | ||
152 | struct page *page; | 197 | struct page *page; |
153 | 198 | ||
154 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { | 199 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { |
155 | page = vaddr; | 200 | page = vaddr; |
156 | } else if (addr >= XCHAL_KSEG_BYPASS_VADDR && | 201 | } else if (platform_vaddr_uncached(vaddr)) { |
157 | addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) { | 202 | page = virt_to_page(platform_vaddr_to_cached(vaddr)); |
158 | addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | ||
159 | page = virt_to_page(addr); | ||
160 | } else { | 203 | } else { |
161 | #ifdef CONFIG_MMU | 204 | #ifdef CONFIG_MMU |
162 | dma_common_free_remap(vaddr, size, VM_MAP); | 205 | dma_common_free_remap(vaddr, size, VM_MAP); |