aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2007-08-22 13:14:51 -0400
committerChris Zankel <chris@zankel.net>2007-08-27 16:54:16 -0400
commit6656920b0b50beacb6cb64cf55273cbb686e436e (patch)
treedab9fdb81821b455a29779de6ca3306dbdf05dbd /arch/xtensa/mm
parentff6fd469885aafa5ec387babcb6537f3c00d6df0 (diff)
[XTENSA] Add support for cache-aliasing
Add support for processors that have cache-aliasing issues, such as the Stretch S5000 processor. Cache-aliasing means that the size of the cache (for one way) is larger than the page size, thus, a page can end up in several places in cache depending on the virtual to physical translation. The method used here is to map a user page temporarily through the auto-refill way 0 and of of the DTLB. We probably will want to revisit this issue and use a better approach with kmap/kunmap. Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/Makefile6
-rw-r--r--arch/xtensa/mm/cache.c256
-rw-r--r--arch/xtensa/mm/fault.c6
-rw-r--r--arch/xtensa/mm/init.c252
-rw-r--r--arch/xtensa/mm/misc.S306
5 files changed, 547 insertions, 279 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index a5aed5932d7b..10aec22a8f98 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -5,9 +5,5 @@
5# removes any old dependencies. DON'T put your own dependencies here 5# removes any old dependencies. DON'T put your own dependencies here
6# unless it's something special (ie not a .c file). 6# unless it's something special (ie not a .c file).
7# 7#
8# Note 2! The CFLAGS definition is now in the main makefile...
9 8
10obj-y := init.o fault.o tlb.o misc.o 9obj-y := init.o fault.o tlb.o misc.o cache.o
11obj-m :=
12obj-n :=
13obj- :=
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
new file mode 100644
index 000000000000..9a1fa9478ae7
--- /dev/null
+++ b/arch/xtensa/mm/cache.c
@@ -0,0 +1,256 @@
1/*
2 * arch/xtensa/mm/cache.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2006 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor
12 * Marc Gauthier
13 *
14 */
15
16#include <linux/init.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/bootmem.h>
25#include <linux/swap.h>
26#include <linux/pagemap.h>
27
28#include <asm/pgtable.h>
29#include <asm/bootparam.h>
30#include <asm/mmu_context.h>
31#include <asm/tlb.h>
32#include <asm/tlbflush.h>
33#include <asm/page.h>
34#include <asm/pgalloc.h>
35#include <asm/pgtable.h>
36
37//#define printd(x...) printk(x)
38#define printd(x...) do { } while(0)
39
40/*
41 * Note:
42 * The kernel provides one architecture bit PG_arch_1 in the page flags that
43 * can be used for cache coherency.
44 *
45 * I$-D$ coherency.
46 *
47 * The Xtensa architecture doesn't keep the instruction cache coherent with
48 * the data cache. We use the architecture bit to indicate if the caches
49 * are coherent. The kernel clears this bit whenever a page is added to the
50 * page cache. At that time, the caches might not be in sync. We, therefore,
51 * define this flag as 'clean' if set.
52 *
53 * D-cache aliasing.
54 *
55 * With cache aliasing, we have to always flush the cache when pages are
56 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
57 * page.
58 *
59 *
60 *
61 */
62
63#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
64
65/*
66 * Any time the kernel writes to a user page cache page, or it is about to
67 * read from a page cache page this routine is called.
68 *
69 */
70
71void flush_dcache_page(struct page *page)
72{
73 struct address_space *mapping = page_mapping(page);
74
75 /*
76 * If we have a mapping but the page is not mapped to user-space
77 * yet, we simply mark this page dirty and defer flushing the
78 * caches until update_mmu().
79 */
80
81 if (mapping && !mapping_mapped(mapping)) {
82 if (!test_bit(PG_arch_1, &page->flags))
83 set_bit(PG_arch_1, &page->flags);
84 return;
85
86 } else {
87
88 unsigned long phys = page_to_phys(page);
89 unsigned long temp = page->index << PAGE_SHIFT;
90 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
91 unsigned long virt;
92
93 /*
94 * Flush the page in kernel space and user space.
95 * Note that we can omit that step if aliasing is not
96 * an issue, but we do have to synchronize I$ and D$
97 * if we have a mapping.
98 */
99
100 if (!alias && !mapping)
101 return;
102
103 __flush_invalidate_dcache_page((long)page_address(page));
104
105 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
106
107 if (alias)
108 __flush_invalidate_dcache_page_alias(virt, phys);
109
110 if (mapping)
111 __invalidate_icache_page_alias(virt, phys);
112 }
113
114 /* There shouldn't be an entry in the cache for this page anymore. */
115}
116
117
118/*
119 * For now, flush the whole cache. FIXME??
120 */
121
122void flush_cache_range(struct vm_area_struct* vma,
123 unsigned long start, unsigned long end)
124{
125 __flush_invalidate_dcache_all();
126 __invalidate_icache_all();
127}
128
129/*
130 * Remove any entry in the cache for this page.
131 *
132 * Note that this function is only called for user pages, so use the
133 * alias versions of the cache flush functions.
134 */
135
136void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
137 unsigned long pfn)
138{
139 /* Note that we have to use the 'alias' address to avoid multi-hit */
140
141 unsigned long phys = page_to_phys(pfn_to_page(pfn));
142 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
143
144 __flush_invalidate_dcache_page_alias(virt, phys);
145 __invalidate_icache_page_alias(virt, phys);
146}
147
148#endif
149
150void
151update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
152{
153 unsigned long pfn = pte_pfn(pte);
154 struct page *page;
155
156 if (!pfn_valid(pfn))
157 return;
158
159 page = pfn_to_page(pfn);
160
161 /* Invalidate old entry in TLBs */
162
163 invalidate_itlb_mapping(addr);
164 invalidate_dtlb_mapping(addr);
165
166#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
167
168 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
169
170 unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
171 unsigned long paddr = (unsigned long) page_address(page);
172 unsigned long phys = page_to_phys(page);
173
174 __flush_invalidate_dcache_page(paddr);
175
176 __flush_invalidate_dcache_page_alias(vaddr, phys);
177 __invalidate_icache_page_alias(vaddr, phys);
178
179 clear_bit(PG_arch_1, &page->flags);
180 }
181#else
182 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
183 && (vma->vm_flags & VM_EXEC) != 0) {
184 unsigned long vaddr = addr & PAGE_MASK;
185 __flush_dcache_page(vaddr);
186 __invalidate_icache_page(vaddr);
187 set_bit(PG_arch_1, &page->flags);
188 }
189#endif
190}
191
192/*
193 * access_process_vm() has called get_user_pages(), which has done a
194 * flush_dcache_page() on the page.
195 */
196
197#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
198
199void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
200 unsigned long vaddr, void *dst, const void *src,
201 unsigned long len)
202{
203 unsigned long phys = page_to_phys(page);
204 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
205
206 /* Flush and invalidate user page if aliased. */
207
208 if (alias) {
209 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
210 __flush_invalidate_dcache_page_alias(temp, phys);
211 }
212
213 /* Copy data */
214
215 memcpy(dst, src, len);
216
217 /*
218 * Flush and invalidate kernel page if aliased and synchronize
219 * data and instruction caches for executable pages.
220 */
221
222 if (alias) {
223 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
224
225 __flush_invalidate_dcache_range((unsigned long) dst, len);
226 if ((vma->vm_flags & VM_EXEC) != 0) {
227 __invalidate_icache_page_alias(temp, phys);
228 }
229
230 } else if ((vma->vm_flags & VM_EXEC) != 0) {
231 __flush_dcache_range((unsigned long)dst,len);
232 __invalidate_icache_range((unsigned long) dst, len);
233 }
234}
235
236extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
237 unsigned long vaddr, void *dst, const void *src,
238 unsigned long len)
239{
240 unsigned long phys = page_to_phys(page);
241 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
242
243 /*
244 * Flush user page if aliased.
245 * (Note: a simply flush would be sufficient)
246 */
247
248 if (alias) {
249 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
250 __flush_invalidate_dcache_page_alias(temp, phys);
251 }
252
253 memcpy(dst, src, len);
254}
255
256#endif
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 16004067add3..45d28f217c03 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -24,6 +24,8 @@
24unsigned long asid_cache = ASID_USER_FIRST; 24unsigned long asid_cache = ASID_USER_FIRST;
25void bad_page_fault(struct pt_regs*, unsigned long, int); 25void bad_page_fault(struct pt_regs*, unsigned long, int);
26 26
27#undef DEBUG_PAGE_FAULT
28
27/* 29/*
28 * This routine handles page faults. It determines the address, 30 * This routine handles page faults. It determines the address,
29 * and the problem, and then passes it off to one of the appropriate 31 * and the problem, and then passes it off to one of the appropriate
@@ -64,7 +66,7 @@ void do_page_fault(struct pt_regs *regs)
64 exccause == EXCCAUSE_ITLB_MISS || 66 exccause == EXCCAUSE_ITLB_MISS ||
65 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; 67 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
66 68
67#if 0 69#ifdef DEBUG_PAGE_FAULT
68 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, 70 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
69 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); 71 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
70#endif 72#endif
@@ -219,7 +221,7 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
219 221
220 /* Are we prepared to handle this kernel fault? */ 222 /* Are we prepared to handle this kernel fault? */
221 if ((entry = search_exception_tables(regs->pc)) != NULL) { 223 if ((entry = search_exception_tables(regs->pc)) != NULL) {
222#if 1 224#ifdef DEBUG_PAGE_FAULT
223 printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", 225 printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
224 current->comm, regs->pc, entry->fixup); 226 current->comm, regs->pc, entry->fixup);
225#endif 227#endif
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 8415c76f11c2..b3086f34a8e7 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -15,40 +15,24 @@
15 * Kevin Chea 15 * Kevin Chea
16 */ 16 */
17 17
18#include <linux/init.h>
19#include <linux/signal.h>
20#include <linux/sched.h>
21#include <linux/kernel.h> 18#include <linux/kernel.h>
22#include <linux/errno.h> 19#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/ptrace.h>
26#include <linux/bootmem.h> 20#include <linux/bootmem.h>
27#include <linux/swap.h> 21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/nodemask.h>
24#include <linux/mm.h>
25#include <linux/slab.h>
28 26
29#include <asm/pgtable.h> 27#include <asm/pgtable.h>
30#include <asm/bootparam.h> 28#include <asm/bootparam.h>
31#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
32#include <asm/tlb.h> 30#include <asm/tlb.h>
33#include <asm/tlbflush.h>
34#include <asm/page.h> 31#include <asm/page.h>
35#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
36#include <asm/pgtable.h>
37
38 33
39#define DEBUG 0
40 34
41DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42//static DEFINE_SPINLOCK(tlb_lock);
43
44/*
45 * This flag is used to indicate that the page was mapped and modified in
46 * kernel space, so the cache is probably dirty at that address.
47 * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
48 * synchronizes the caches if this bit is set.
49 */
50
51#define PG_cache_clean PG_arch_1
52 36
53/* References to section boundaries */ 37/* References to section boundaries */
54 38
@@ -323,228 +307,22 @@ void show_mem(void)
323 printk("%d free pages\n", free); 307 printk("%d free pages\n", free);
324} 308}
325 309
326/* ------------------------------------------------------------------------- */ 310struct kmem_cache *pgtable_cache __read_mostly;
327
328#if (DCACHE_WAY_SIZE > PAGE_SIZE)
329
330/*
331 * With cache aliasing, the page color of the page in kernel space and user
332 * space might mismatch. We temporarily map the page to a different virtual
333 * address with the same color and clear the page there.
334 */
335
336void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
337{
338
339 /* There shouldn't be any entries for this page. */
340
341 __flush_invalidate_dcache_page_phys(__pa(page_address(page)));
342
343 if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
344 unsigned long v, p;
345
346 /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
347
348 spin_lock(&tlb_lock);
349
350 p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
351 kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
352 v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
353 __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
354
355 clear_page(kaddr);
356
357 spin_unlock(&tlb_lock);
358 } else {
359 clear_page(kaddr);
360 }
361
362 /* We need to make sure that i$ and d$ are coherent. */
363
364 clear_bit(PG_cache_clean, &page->flags);
365}
366
367/*
368 * With cache aliasing, we have to make sure that the page color of the page
369 * in kernel space matches that of the virtual user address before we read
370 * the page. If the page color differ, we create a temporary DTLB entry with
371 * the corrent page color and use this 'temporary' address as the source.
372 * We then use the same approach as in clear_user_page and copy the data
373 * to the kernel space and clear the PG_cache_clean bit to synchronize caches
374 * later.
375 *
376 * Note:
377 * Instead of using another 'way' for the temporary DTLB entry, we could
378 * probably use the same entry that points to the kernel address (after
379 * saving the original value and restoring it when we are done).
380 */
381 311
382void copy_user_page(void* to, void* from, unsigned long vaddr, 312static void pgd_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
383 struct page* to_page)
384{ 313{
385 /* There shouldn't be any entries for the new page. */ 314 pte_t* ptep = (pte_t*)addr;
386 315 int i;
387 __flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
388
389 spin_lock(&tlb_lock);
390
391 if (!PAGE_COLOR_EQ(vaddr, from)) {
392 unsigned long v, p, t;
393
394 __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
395 : "=a"(p), "=a"(t) : "a"(from));
396 from = (void*)PAGE_COLOR_MAP0(vaddr);
397 v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
398 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
399 }
400
401 if (!PAGE_COLOR_EQ(vaddr, to)) {
402 unsigned long v, p;
403
404 p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
405 to = (void*)PAGE_COLOR_MAP1(vaddr);
406 v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
407 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
408 }
409 copy_page(to, from);
410
411 spin_unlock(&tlb_lock);
412
413 /* We need to make sure that i$ and d$ are coherent. */
414
415 clear_bit(PG_cache_clean, &to_page->flags);
416}
417
418
419
420/*
421 * Any time the kernel writes to a user page cache page, or it is about to
422 * read from a page cache page this routine is called.
423 *
424 * Note:
425 * The kernel currently only provides one architecture bit in the page
426 * flags that we use for I$/D$ coherency. Maybe, in future, we can
427 * use a sepearte bit for deferred dcache aliasing:
428 * If the page is not mapped yet, we only need to set a flag,
429 * if mapped, we need to invalidate the page.
430 */
431// FIXME: we probably need this for WB caches not only for Page Coloring..
432
433void flush_dcache_page(struct page *page)
434{
435 unsigned long addr = __pa(page_address(page));
436 struct address_space *mapping = page_mapping(page);
437
438 __flush_invalidate_dcache_page_phys(addr);
439
440 if (!test_bit(PG_cache_clean, &page->flags))
441 return;
442
443 /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
444#if 0
445 if (mapping && !mapping_mapped(mapping))
446 clear_bit(PG_cache_clean, &page->flags);
447 else
448#endif
449 __invalidate_icache_page_phys(addr);
450}
451
452void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
453 unsigned long e)
454{
455 __flush_invalidate_cache_all();
456}
457
458void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
459 unsigned long pfn)
460{
461 struct page *page = pfn_to_page(pfn);
462
463 /* Remove any entry for the old mapping. */
464
465 if (current->active_mm == vma->vm_mm) {
466 unsigned long addr = __pa(page_address(page));
467 __flush_invalidate_dcache_page_phys(addr);
468 if ((vma->vm_flags & VM_EXEC) != 0)
469 __invalidate_icache_page_phys(addr);
470 } else {
471 BUG();
472 }
473}
474
475#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
476
477
478pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
479{
480 pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
481 if (likely(pte)) {
482 pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
483 int i;
484 for (i = 0; i < 1024; i++, ptep++)
485 pte_clear(mm, addr, ptep);
486 }
487 return pte;
488}
489
490struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
491{
492 struct page *page;
493
494 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
495
496 if (likely(page)) {
497 pte_t* ptep = kmap_atomic(page, KM_USER0);
498 int i;
499 316
500 for (i = 0; i < 1024; i++, ptep++) 317 for (i = 0; i < 1024; i++, ptep++)
501 pte_clear(mm, addr, ptep); 318 pte_clear(NULL, 0, ptep);
502 319
503 kunmap_atomic(ptep, KM_USER0);
504 }
505 return page;
506} 320}
507 321
508 322void __init pgtable_cache_init(void)
509/*
510 * Handle D$/I$ coherency.
511 *
512 * Note:
513 * We only have one architecture bit for the page flags, so we cannot handle
514 * cache aliasing, yet.
515 */
516
517void
518update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
519{ 323{
520 unsigned long pfn = pte_pfn(pte); 324 pgtable_cache = kmem_cache_create("pgd",
521 struct page *page; 325 PAGE_SIZE, PAGE_SIZE,
522 unsigned long vaddr = addr & PAGE_MASK; 326 SLAB_HWCACHE_ALIGN,
523 327 pgd_ctor);
524 if (!pfn_valid(pfn))
525 return;
526
527 page = pfn_to_page(pfn);
528
529 invalidate_itlb_mapping(addr);
530 invalidate_dtlb_mapping(addr);
531
532 /* We have a new mapping. Use it. */
533
534 write_dtlb_entry(pte, dtlb_probe(addr));
535
536 /* If the processor can execute from this page, synchronize D$/I$. */
537
538 if ((vma->vm_flags & VM_EXEC) != 0) {
539
540 write_itlb_entry(pte, itlb_probe(addr));
541
542 /* Synchronize caches, if not clean. */
543
544 if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
545 __flush_dcache_page(vaddr);
546 __invalidate_icache_page(vaddr);
547 }
548 }
549} 328}
550
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index ae085332c607..e1f880368e32 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -7,29 +7,33 @@
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 * 9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc. 10 * Copyright (C) 2001 - 2007 Tensilica Inc.
11 * 11 *
12 * Chris Zankel <chris@zankel.net> 12 * Chris Zankel <chris@zankel.net>
13 */ 13 */
14 14
15/* Note: we might want to implement some of the loops as zero-overhead-loops,
16 * where applicable and if supported by the processor.
17 */
18 15
19#include <linux/linkage.h> 16#include <linux/linkage.h>
20#include <asm/page.h> 17#include <asm/page.h>
21#include <asm/pgtable.h> 18#include <asm/pgtable.h>
22#include <asm/asmmacro.h> 19#include <asm/asmmacro.h>
23#include <asm/cacheasm.h> 20#include <asm/cacheasm.h>
21#include <asm/tlbflush.h>
22
24 23
25/* clear_page (page) */ 24/*
25 * clear_page and clear_user_page are the same for non-cache-aliased configs.
26 *
27 * clear_page (unsigned long page)
28 * a2
29 */
26 30
27ENTRY(clear_page) 31ENTRY(clear_page)
28 entry a1, 16 32 entry a1, 16
29 addi a4, a2, PAGE_SIZE
30 movi a3, 0
31 33
321: s32i a3, a2, 0 34 movi a3, 0
35 __loopi a2, a7, PAGE_SIZE, 32
36 s32i a3, a2, 0
33 s32i a3, a2, 4 37 s32i a3, a2, 4
34 s32i a3, a2, 8 38 s32i a3, a2, 8
35 s32i a3, a2, 12 39 s32i a3, a2, 12
@@ -37,42 +41,277 @@ ENTRY(clear_page)
37 s32i a3, a2, 20 41 s32i a3, a2, 20
38 s32i a3, a2, 24 42 s32i a3, a2, 24
39 s32i a3, a2, 28 43 s32i a3, a2, 28
40 addi a2, a2, 32 44 __endla a2, a7, 32
41 blt a2, a4, 1b
42 45
43 retw 46 retw
44 47
45/* 48/*
49 * copy_page and copy_user_page are the same for non-cache-aliased configs.
50 *
46 * copy_page (void *to, void *from) 51 * copy_page (void *to, void *from)
47 * a2 a3 52 * a2 a3
48 */ 53 */
49 54
50ENTRY(copy_page) 55ENTRY(copy_page)
51 entry a1, 16 56 entry a1, 16
52 addi a4, a2, PAGE_SIZE
53
541: l32i a5, a3, 0
55 l32i a6, a3, 4
56 l32i a7, a3, 8
57 s32i a5, a2, 0
58 s32i a6, a2, 4
59 s32i a7, a2, 8
60 l32i a5, a3, 12
61 l32i a6, a3, 16
62 l32i a7, a3, 20
63 s32i a5, a2, 12
64 s32i a6, a2, 16
65 s32i a7, a2, 20
66 l32i a5, a3, 24
67 l32i a6, a3, 28
68 s32i a5, a2, 24
69 s32i a6, a2, 28
70 addi a2, a2, 32
71 addi a3, a3, 32
72 blt a2, a4, 1b
73 57
58 __loopi a2, a4, PAGE_SIZE, 32
59
60 l32i a8, a3, 0
61 l32i a9, a3, 4
62 s32i a8, a2, 0
63 s32i a9, a2, 4
64
65 l32i a8, a3, 8
66 l32i a9, a3, 12
67 s32i a8, a2, 8
68 s32i a9, a2, 12
69
70 l32i a8, a3, 16
71 l32i a9, a3, 20
72 s32i a8, a2, 16
73 s32i a9, a2, 20
74
75 l32i a8, a3, 24
76 l32i a9, a3, 28
77 s32i a8, a2, 24
78 s32i a9, a2, 28
79
80 addi a2, a2, 32
81 addi a3, a3, 32
82
83 __endl a2, a4
84
85 retw
86
87/*
88 * If we have to deal with cache aliasing, we use temporary memory mappings
89 * to ensure that the source and destination pages have the same color as
90 * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
91 *
92 * The temporary DTLB entries shouldn't be flushed by interrupts, but are
93 * flushed by preemptive task switches. Special code in the
94 * fast_second_level_miss handler re-established the temporary mapping.
95 * It requires that the PPNs for the destination and source addresses are
96 * in a6, and a7, respectively.
97 */
98
99/* TLB miss exceptions are treated special in the following region */
100
101ENTRY(__tlbtemp_mapping_start)
102
103#if (DCACHE_WAY_SIZE > PAGE_SIZE)
104
105/*
106 * clear_user_page (void *addr, unsigned long vaddr, struct page *page)
107 * a2 a3 a4
108 */
109
110ENTRY(clear_user_page)
111 entry a1, 32
112
113 /* Mark page dirty and determine alias. */
114
115 movi a7, (1 << PG_ARCH_1)
116 l32i a5, a4, PAGE_FLAGS
117 xor a6, a2, a3
118 extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER
119 extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
120 or a5, a5, a7
121 slli a3, a3, PAGE_SHIFT
122 s32i a5, a4, PAGE_FLAGS
123
124 /* Skip setting up a temporary DTLB if not aliased. */
125
126 beqz a6, 1f
127
128 /* Invalidate kernel page. */
129
130 mov a10, a2
131 call8 __invalidate_dcache_page
132
133 /* Setup a temporary DTLB with the color of the VPN */
134
135 movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
136 movi a5, TLBTEMP_BASE_1 # virt
137 add a6, a2, a4 # ppn
138 add a2, a5, a3 # add 'color'
139
140 wdtlb a6, a2
141 dsync
142
1431: movi a3, 0
144 __loopi a2, a7, PAGE_SIZE, 32
145 s32i a3, a2, 0
146 s32i a3, a2, 4
147 s32i a3, a2, 8
148 s32i a3, a2, 12
149 s32i a3, a2, 16
150 s32i a3, a2, 20
151 s32i a3, a2, 24
152 s32i a3, a2, 28
153 __endla a2, a7, 32
154
155 bnez a6, 1f
156 retw
157
158 /* We need to invalidate the temporary idtlb entry, if any. */
159
1601: addi a2, a2, -PAGE_SIZE
161 idtlb a2
162 dsync
163
164 retw
165
166/*
167 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
168 * a2 a3 a4 a5
169 */
170
171ENTRY(copy_user_page)
172
173 entry a1, 32
174
175 /* Mark page dirty and determine alias for destination. */
176
177 movi a8, (1 << PG_ARCH_1)
178 l32i a9, a5, PAGE_FLAGS
179 xor a6, a2, a4
180 xor a7, a3, a4
181 extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER
182 extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
183 extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER
184 or a9, a9, a8
185 slli a4, a4, PAGE_SHIFT
186 s32i a9, a5, PAGE_FLAGS
187 movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
188
189 beqz a6, 1f
190
191 /* Invalidate dcache */
192
193 mov a10, a2
194 call8 __invalidate_dcache_page
195
196 /* Setup a temporary DTLB with a matching color. */
197
198 movi a8, TLBTEMP_BASE_1 # base
199 add a6, a2, a5 # ppn
200 add a2, a8, a4 # add 'color'
201
202 wdtlb a6, a2
203 dsync
204
205 /* Skip setting up a temporary DTLB for destination if not aliased. */
206
2071: beqz a7, 1f
208
209 /* Setup a temporary DTLB with a matching color. */
210
211 movi a8, TLBTEMP_BASE_2 # base
212 add a7, a3, a5 # ppn
213 add a3, a8, a4
214 addi a8, a3, 1 # way1
215
216 wdtlb a7, a8
217 dsync
218
2191: __loopi a2, a4, PAGE_SIZE, 32
220
221 l32i a8, a3, 0
222 l32i a9, a3, 4
223 s32i a8, a2, 0
224 s32i a9, a2, 4
225
226 l32i a8, a3, 8
227 l32i a9, a3, 12
228 s32i a8, a2, 8
229 s32i a9, a2, 12
230
231 l32i a8, a3, 16
232 l32i a9, a3, 20
233 s32i a8, a2, 16
234 s32i a9, a2, 20
235
236 l32i a8, a3, 24
237 l32i a9, a3, 28
238 s32i a8, a2, 24
239 s32i a9, a2, 28
240
241 addi a2, a2, 32
242 addi a3, a3, 32
243
244 __endl a2, a4
245
246 /* We need to invalidate any temporary mapping! */
247
248 bnez a6, 1f
249 bnez a7, 2f
250 retw
251
2521: addi a2, a2, -PAGE_SIZE
253 idtlb a2
254 dsync
255 bnez a7, 2f
256 retw
257
2582: addi a3, a3, -PAGE_SIZE+1
259 idtlb a3
260 dsync
261
262 retw
263
264#endif
265
266#if (DCACHE_WAY_SIZE > PAGE_SIZE)
267
268/*
269 * void __flush_invalidate_dcache_page_alias (addr, phys)
270 * a2 a3
271 */
272
273ENTRY(__flush_invalidate_dcache_page_alias)
274 entry sp, 16
275
276 movi a7, 0 # required for exception handler
277 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
278 mov a4, a2
279 wdtlb a6, a2
280 dsync
281
282 ___flush_invalidate_dcache_page a2 a3
283
284 idtlb a4
285 dsync
286
287 retw
288
289#endif
290
291ENTRY(__tlbtemp_mapping_itlb)
292
293#if (ICACHE_WAY_SIZE > PAGE_SIZE)
294
295ENTRY(__invalidate_icache_page_alias)
296 entry sp, 16
297
298 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
299 mov a4, a2
300 witlb a6, a2
301 isync
302
303 ___invalidate_icache_page a2 a3
304
305 iitlb a4
306 isync
74 retw 307 retw
75 308
309#endif
310
311/* End of special treatment in tlb miss exception */
312
313ENTRY(__tlbtemp_mapping_end)
314
76/* 315/*
77 * void __invalidate_icache_page(ulong start) 316 * void __invalidate_icache_page(ulong start)
78 */ 317 */
@@ -121,8 +360,6 @@ ENTRY(__flush_dcache_page)
121 dsync 360 dsync
122 retw 361 retw
123 362
124
125
126/* 363/*
127 * void __invalidate_icache_range(ulong start, ulong size) 364 * void __invalidate_icache_range(ulong start, ulong size)
128 */ 365 */
@@ -168,7 +405,6 @@ ENTRY(__invalidate_dcache_range)
168 405
169 ___invalidate_dcache_range a2 a3 a4 406 ___invalidate_dcache_range a2 a3 a4
170 407
171
172 retw 408 retw
173 409
174/* 410/*