diff options
author | Ley Foon Tan <lftan@altera.com> | 2015-04-24 02:18:55 -0400 |
---|---|---|
committer | Ley Foon Tan <lftan@altera.com> | 2015-04-24 02:49:53 -0400 |
commit | 1a70db49a735350f221959bf26e9a12f63a743bc (patch) | |
tree | 560890c58f12f77d2e1d70482cefc6b0c4111d6e | |
parent | 2009337e30cdae64405ea7b8fa1578b921508871 (diff) |
nios2: rework cache
- flush dcache before flush instruction cache
- remork update_mmu_cache and flush_dcache_page
- add shmparam.h
Signed-off-by: Ley Foon Tan <lftan@altera.com>
-rw-r--r-- | arch/nios2/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/nios2/include/asm/shmparam.h | 21 | ||||
-rw-r--r-- | arch/nios2/mm/cacheflush.c | 52 |
3 files changed, 57 insertions, 17 deletions
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 01c75f36e8b3..24b3d8999ac7 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild | |||
@@ -46,7 +46,6 @@ generic-y += segment.h | |||
46 | generic-y += sembuf.h | 46 | generic-y += sembuf.h |
47 | generic-y += serial.h | 47 | generic-y += serial.h |
48 | generic-y += shmbuf.h | 48 | generic-y += shmbuf.h |
49 | generic-y += shmparam.h | ||
50 | generic-y += siginfo.h | 49 | generic-y += siginfo.h |
51 | generic-y += signal.h | 50 | generic-y += signal.h |
52 | generic-y += socket.h | 51 | generic-y += socket.h |
diff --git a/arch/nios2/include/asm/shmparam.h b/arch/nios2/include/asm/shmparam.h new file mode 100644 index 000000000000..60784294e407 --- /dev/null +++ b/arch/nios2/include/asm/shmparam.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright Altera Corporation (C) <2015>. All rights reserved | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef _ASM_NIOS2_SHMPARAM_H | ||
17 | #define _ASM_NIOS2_SHMPARAM_H | ||
18 | |||
19 | #define SHMLBA CONFIG_NIOS2_DCACHE_SIZE | ||
20 | |||
21 | #endif /* _ASM_NIOS2_SHMPARAM_H */ | ||
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index a09b2b735803..223cdcc8203f 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c | |||
@@ -128,12 +128,14 @@ void flush_cache_dup_mm(struct mm_struct *mm) | |||
128 | 128 | ||
129 | void flush_icache_range(unsigned long start, unsigned long end) | 129 | void flush_icache_range(unsigned long start, unsigned long end) |
130 | { | 130 | { |
131 | __flush_dcache(start, end); | ||
131 | __flush_icache(start, end); | 132 | __flush_icache(start, end); |
132 | } | 133 | } |
133 | 134 | ||
134 | void flush_dcache_range(unsigned long start, unsigned long end) | 135 | void flush_dcache_range(unsigned long start, unsigned long end) |
135 | { | 136 | { |
136 | __flush_dcache(start, end); | 137 | __flush_dcache(start, end); |
138 | __flush_icache(start, end); | ||
137 | } | 139 | } |
138 | EXPORT_SYMBOL(flush_dcache_range); | 140 | EXPORT_SYMBOL(flush_dcache_range); |
139 | 141 | ||
@@ -156,6 +158,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page) | |||
156 | unsigned long start = (unsigned long) page_address(page); | 158 | unsigned long start = (unsigned long) page_address(page); |
157 | unsigned long end = start + PAGE_SIZE; | 159 | unsigned long end = start + PAGE_SIZE; |
158 | 160 | ||
161 | __flush_dcache(start, end); | ||
159 | __flush_icache(start, end); | 162 | __flush_icache(start, end); |
160 | } | 163 | } |
161 | 164 | ||
@@ -170,6 +173,18 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, | |||
170 | __flush_icache(start, end); | 173 | __flush_icache(start, end); |
171 | } | 174 | } |
172 | 175 | ||
176 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | ||
177 | { | ||
178 | /* | ||
179 | * Writeback any data associated with the kernel mapping of this | ||
180 | * page. This ensures that data in the physical page is mutually | ||
181 | * coherent with the kernels mapping. | ||
182 | */ | ||
183 | unsigned long start = (unsigned long)page_address(page); | ||
184 | |||
185 | __flush_dcache_all(start, start + PAGE_SIZE); | ||
186 | } | ||
187 | |||
173 | void flush_dcache_page(struct page *page) | 188 | void flush_dcache_page(struct page *page) |
174 | { | 189 | { |
175 | struct address_space *mapping; | 190 | struct address_space *mapping; |
@@ -187,11 +202,12 @@ void flush_dcache_page(struct page *page) | |||
187 | if (mapping && !mapping_mapped(mapping)) { | 202 | if (mapping && !mapping_mapped(mapping)) { |
188 | clear_bit(PG_dcache_clean, &page->flags); | 203 | clear_bit(PG_dcache_clean, &page->flags); |
189 | } else { | 204 | } else { |
190 | unsigned long start = (unsigned long)page_address(page); | 205 | __flush_dcache_page(mapping, page); |
191 | 206 | if (mapping) { | |
192 | __flush_dcache_all(start, start + PAGE_SIZE); | 207 | unsigned long start = (unsigned long)page_address(page); |
193 | if (mapping) | ||
194 | flush_aliases(mapping, page); | 208 | flush_aliases(mapping, page); |
209 | flush_icache_range(start, start + PAGE_SIZE); | ||
210 | } | ||
195 | set_bit(PG_dcache_clean, &page->flags); | 211 | set_bit(PG_dcache_clean, &page->flags); |
196 | } | 212 | } |
197 | } | 213 | } |
@@ -202,6 +218,7 @@ void update_mmu_cache(struct vm_area_struct *vma, | |||
202 | { | 218 | { |
203 | unsigned long pfn = pte_pfn(*pte); | 219 | unsigned long pfn = pte_pfn(*pte); |
204 | struct page *page; | 220 | struct page *page; |
221 | struct address_space *mapping; | ||
205 | 222 | ||
206 | if (!pfn_valid(pfn)) | 223 | if (!pfn_valid(pfn)) |
207 | return; | 224 | return; |
@@ -214,16 +231,15 @@ void update_mmu_cache(struct vm_area_struct *vma, | |||
214 | if (page == ZERO_PAGE(0)) | 231 | if (page == ZERO_PAGE(0)) |
215 | return; | 232 | return; |
216 | 233 | ||
217 | if (!PageReserved(page) && | 234 | mapping = page_mapping(page); |
218 | !test_and_set_bit(PG_dcache_clean, &page->flags)) { | 235 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
219 | unsigned long start = page_to_virt(page); | 236 | __flush_dcache_page(mapping, page); |
220 | struct address_space *mapping; | 237 | |
221 | 238 | if(mapping) | |
222 | __flush_dcache(start, start + PAGE_SIZE); | 239 | { |
223 | 240 | flush_aliases(mapping, page); | |
224 | mapping = page_mapping(page); | 241 | if (vma->vm_flags & VM_EXEC) |
225 | if (mapping) | 242 | flush_icache_page(vma, page); |
226 | flush_aliases(mapping, page); | ||
227 | } | 243 | } |
228 | } | 244 | } |
229 | 245 | ||
@@ -231,15 +247,19 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | |||
231 | struct page *to) | 247 | struct page *to) |
232 | { | 248 | { |
233 | __flush_dcache(vaddr, vaddr + PAGE_SIZE); | 249 | __flush_dcache(vaddr, vaddr + PAGE_SIZE); |
250 | __flush_icache(vaddr, vaddr + PAGE_SIZE); | ||
234 | copy_page(vto, vfrom); | 251 | copy_page(vto, vfrom); |
235 | __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); | 252 | __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); |
253 | __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); | ||
236 | } | 254 | } |
237 | 255 | ||
238 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page) | 256 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page) |
239 | { | 257 | { |
240 | __flush_dcache(vaddr, vaddr + PAGE_SIZE); | 258 | __flush_dcache(vaddr, vaddr + PAGE_SIZE); |
259 | __flush_icache(vaddr, vaddr + PAGE_SIZE); | ||
241 | clear_page(addr); | 260 | clear_page(addr); |
242 | __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); | 261 | __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); |
262 | __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); | ||
243 | } | 263 | } |
244 | 264 | ||
245 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | 265 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, |
@@ -248,7 +268,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |||
248 | { | 268 | { |
249 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); | 269 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); |
250 | memcpy(dst, src, len); | 270 | memcpy(dst, src, len); |
251 | __flush_dcache((unsigned long)src, (unsigned long)src + len); | 271 | __flush_dcache_all((unsigned long)src, (unsigned long)src + len); |
252 | if (vma->vm_flags & VM_EXEC) | 272 | if (vma->vm_flags & VM_EXEC) |
253 | __flush_icache((unsigned long)src, (unsigned long)src + len); | 273 | __flush_icache((unsigned long)src, (unsigned long)src + len); |
254 | } | 274 | } |
@@ -259,7 +279,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
259 | { | 279 | { |
260 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); | 280 | flush_cache_page(vma, user_vaddr, page_to_pfn(page)); |
261 | memcpy(dst, src, len); | 281 | memcpy(dst, src, len); |
262 | __flush_dcache((unsigned long)dst, (unsigned long)dst + len); | 282 | __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); |
263 | if (vma->vm_flags & VM_EXEC) | 283 | if (vma->vm_flags & VM_EXEC) |
264 | __flush_icache((unsigned long)dst, (unsigned long)dst + len); | 284 | __flush_icache((unsigned long)dst, (unsigned long)dst + len); |
265 | } | 285 | } |