diff options
author | Chris Zankel <czankel@tensilica.com> | 2005-06-24 01:01:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-24 03:05:22 -0400 |
commit | 3f65ce4d141e435e54c20ed2379d983d362a2cb5 (patch) | |
tree | 1e86807b3f215d90d9cf57aa609f73f856515b30 | |
parent | 249ac17e96811acc3c6402317dd5d5c89d2cbf68 (diff) |
[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 5
The attached patches provides part 5 of an architecture implementation for the
Tensilica Xtensa CPU series.
Signed-off-by: Chris Zankel <chris@zankel.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/xtensa/mm/Makefile | 13 | ||||
-rw-r--r-- | arch/xtensa/mm/fault.c | 241 | ||||
-rw-r--r-- | arch/xtensa/mm/init.c | 551 | ||||
-rw-r--r-- | arch/xtensa/mm/misc.S | 374 | ||||
-rw-r--r-- | arch/xtensa/mm/pgtable.c | 76 | ||||
-rw-r--r-- | arch/xtensa/mm/tlb.c | 545 |
6 files changed, 1800 insertions, 0 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile new file mode 100644 index 000000000000..a5aed5932d7b --- /dev/null +++ b/arch/xtensa/mm/Makefile | |||
@@ -0,0 +1,13 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/Xtensa-specific parts of the memory manager. | ||
3 | # | ||
4 | # Note! Dependencies are done automagically by 'make dep', which also | ||
5 | # removes any old dependencies. DON'T put your own dependencies here | ||
6 | # unless it's something special (ie not a .c file). | ||
7 | # | ||
8 | # Note 2! The CFLAGS definition is now in the main makefile... | ||
9 | |||
10 | obj-y := init.o fault.o tlb.o misc.o | ||
11 | obj-m := | ||
12 | obj-n := | ||
13 | obj- := | ||
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c new file mode 100644 index 000000000000..a945a33e85a1 --- /dev/null +++ b/arch/xtensa/mm/fault.c | |||
@@ -0,0 +1,241 @@ | |||
1 | // TODO VM_EXEC flag work-around, cache aliasing | ||
2 | /* | ||
3 | * arch/xtensa/mm/fault.c | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | * | ||
9 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
10 | * | ||
11 | * Chris Zankel <chris@zankel.net> | ||
12 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | #include <asm/hardirq.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/system.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | |||
24 | unsigned long asid_cache = ASID_FIRST_VERSION; | ||
25 | void bad_page_fault(struct pt_regs*, unsigned long, int); | ||
26 | |||
27 | /* | ||
28 | * This routine handles page faults. It determines the address, | ||
29 | * and the problem, and then passes it off to one of the appropriate | ||
30 | * routines. | ||
31 | * | ||
32 | * Note: does not handle Miss and MultiHit. | ||
33 | */ | ||
34 | |||
35 | void do_page_fault(struct pt_regs *regs) | ||
36 | { | ||
37 | struct vm_area_struct * vma; | ||
38 | struct mm_struct *mm = current->mm; | ||
39 | unsigned int exccause = regs->exccause; | ||
40 | unsigned int address = regs->excvaddr; | ||
41 | siginfo_t info; | ||
42 | |||
43 | int is_write, is_exec; | ||
44 | |||
45 | info.si_code = SEGV_MAPERR; | ||
46 | |||
47 | /* We fault-in kernel-space virtual memory on-demand. The | ||
48 | * 'reference' page table is init_mm.pgd. | ||
49 | */ | ||
50 | if (address >= TASK_SIZE && !user_mode(regs)) | ||
51 | goto vmalloc_fault; | ||
52 | |||
53 | /* If we're in an interrupt or have no user | ||
54 | * context, we must not take the fault.. | ||
55 | */ | ||
56 | if (in_atomic() || !mm) { | ||
57 | bad_page_fault(regs, address, SIGSEGV); | ||
58 | return; | ||
59 | } | ||
60 | |||
61 | is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; | ||
62 | is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE || | ||
63 | exccause == XCHAL_EXCCAUSE_ITLB_MISS || | ||
64 | exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; | ||
65 | |||
66 | #if 0 | ||
67 | printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, | ||
68 | address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); | ||
69 | #endif | ||
70 | |||
71 | down_read(&mm->mmap_sem); | ||
72 | vma = find_vma(mm, address); | ||
73 | |||
74 | if (!vma) | ||
75 | goto bad_area; | ||
76 | if (vma->vm_start <= address) | ||
77 | goto good_area; | ||
78 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
79 | goto bad_area; | ||
80 | if (expand_stack(vma, address)) | ||
81 | goto bad_area; | ||
82 | |||
83 | /* Ok, we have a good vm_area for this memory access, so | ||
84 | * we can handle it.. | ||
85 | */ | ||
86 | |||
87 | good_area: | ||
88 | info.si_code = SEGV_ACCERR; | ||
89 | |||
90 | if (is_write) { | ||
91 | if (!(vma->vm_flags & VM_WRITE)) | ||
92 | goto bad_area; | ||
93 | } else if (is_exec) { | ||
94 | if (!(vma->vm_flags & VM_EXEC)) | ||
95 | goto bad_area; | ||
96 | } else /* Allow read even from write-only pages. */ | ||
97 | if (!(vma->vm_flags & (VM_READ | VM_WRITE))) | ||
98 | goto bad_area; | ||
99 | |||
100 | /* If for any reason at all we couldn't handle the fault, | ||
101 | * make sure we exit gracefully rather than endlessly redo | ||
102 | * the fault. | ||
103 | */ | ||
104 | survive: | ||
105 | switch (handle_mm_fault(mm, vma, address, is_write)) { | ||
106 | case VM_FAULT_MINOR: | ||
107 | current->min_flt++; | ||
108 | break; | ||
109 | case VM_FAULT_MAJOR: | ||
110 | current->maj_flt++; | ||
111 | break; | ||
112 | case VM_FAULT_SIGBUS: | ||
113 | goto do_sigbus; | ||
114 | case VM_FAULT_OOM: | ||
115 | goto out_of_memory; | ||
116 | default: | ||
117 | BUG(); | ||
118 | } | ||
119 | |||
120 | up_read(&mm->mmap_sem); | ||
121 | return; | ||
122 | |||
123 | /* Something tried to access memory that isn't in our memory map.. | ||
124 | * Fix it, but check if it's kernel or user first.. | ||
125 | */ | ||
126 | bad_area: | ||
127 | up_read(&mm->mmap_sem); | ||
128 | if (user_mode(regs)) { | ||
129 | current->thread.bad_vaddr = address; | ||
130 | current->thread.error_code = is_write; | ||
131 | info.si_signo = SIGSEGV; | ||
132 | info.si_errno = 0; | ||
133 | /* info.si_code has been set above */ | ||
134 | info.si_addr = (void *) address; | ||
135 | force_sig_info(SIGSEGV, &info, current); | ||
136 | return; | ||
137 | } | ||
138 | bad_page_fault(regs, address, SIGSEGV); | ||
139 | return; | ||
140 | |||
141 | |||
142 | /* We ran out of memory, or some other thing happened to us that made | ||
143 | * us unable to handle the page fault gracefully. | ||
144 | */ | ||
145 | out_of_memory: | ||
146 | up_read(&mm->mmap_sem); | ||
147 | if (current->pid == 1) { | ||
148 | yield(); | ||
149 | down_read(&mm->mmap_sem); | ||
150 | goto survive; | ||
151 | } | ||
152 | printk("VM: killing process %s\n", current->comm); | ||
153 | if (user_mode(regs)) | ||
154 | do_exit(SIGKILL); | ||
155 | bad_page_fault(regs, address, SIGKILL); | ||
156 | return; | ||
157 | |||
158 | do_sigbus: | ||
159 | up_read(&mm->mmap_sem); | ||
160 | |||
161 | /* Send a sigbus, regardless of whether we were in kernel | ||
162 | * or user mode. | ||
163 | */ | ||
164 | current->thread.bad_vaddr = address; | ||
165 | info.si_code = SIGBUS; | ||
166 | info.si_errno = 0; | ||
167 | info.si_code = BUS_ADRERR; | ||
168 | info.si_addr = (void *) address; | ||
169 | force_sig_info(SIGBUS, &info, current); | ||
170 | |||
171 | /* Kernel mode? Handle exceptions or die */ | ||
172 | if (!user_mode(regs)) | ||
173 | bad_page_fault(regs, address, SIGBUS); | ||
174 | |||
175 | vmalloc_fault: | ||
176 | { | ||
177 | /* Synchronize this task's top level page-table | ||
178 | * with the 'reference' page table. | ||
179 | */ | ||
180 | struct mm_struct *act_mm = current->active_mm; | ||
181 | int index = pgd_index(address); | ||
182 | pgd_t *pgd, *pgd_k; | ||
183 | pmd_t *pmd, *pmd_k; | ||
184 | pte_t *pte_k; | ||
185 | |||
186 | if (act_mm == NULL) | ||
187 | goto bad_page_fault; | ||
188 | |||
189 | pgd = act_mm->pgd + index; | ||
190 | pgd_k = init_mm.pgd + index; | ||
191 | |||
192 | if (!pgd_present(*pgd_k)) | ||
193 | goto bad_page_fault; | ||
194 | |||
195 | pgd_val(*pgd) = pgd_val(*pgd_k); | ||
196 | |||
197 | pmd = pmd_offset(pgd, address); | ||
198 | pmd_k = pmd_offset(pgd_k, address); | ||
199 | if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) | ||
200 | goto bad_page_fault; | ||
201 | |||
202 | pmd_val(*pmd) = pmd_val(*pmd_k); | ||
203 | pte_k = pte_offset_kernel(pmd_k, address); | ||
204 | |||
205 | if (!pte_present(*pte_k)) | ||
206 | goto bad_page_fault; | ||
207 | return; | ||
208 | } | ||
209 | bad_page_fault: | ||
210 | bad_page_fault(regs, address, SIGKILL); | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | |||
215 | void | ||
216 | bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | ||
217 | { | ||
218 | extern void die(const char*, struct pt_regs*, long); | ||
219 | const struct exception_table_entry *entry; | ||
220 | |||
221 | /* Are we prepared to handle this kernel fault? */ | ||
222 | if ((entry = search_exception_tables(regs->pc)) != NULL) { | ||
223 | #if 1 | ||
224 | printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", | ||
225 | current->comm, regs->pc, entry->fixup); | ||
226 | #endif | ||
227 | current->thread.bad_uaddr = address; | ||
228 | regs->pc = entry->fixup; | ||
229 | return; | ||
230 | } | ||
231 | |||
232 | /* Oops. The kernel tried to access some bad page. We'll have to | ||
233 | * terminate things with extreme prejudice. | ||
234 | */ | ||
235 | printk(KERN_ALERT "Unable to handle kernel paging request at virtual " | ||
236 | "address %08lx\n pc = %08lx, ra = %08lx\n", | ||
237 | address, regs->pc, regs->areg[0]); | ||
238 | die("Oops", regs, sig); | ||
239 | do_exit(sig); | ||
240 | } | ||
241 | |||
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c new file mode 100644 index 000000000000..56aace84aaeb --- /dev/null +++ b/arch/xtensa/mm/init.c | |||
@@ -0,0 +1,551 @@ | |||
1 | /* | ||
2 | * arch/xtensa/mm/init.c | ||
3 | * | ||
4 | * Derived from MIPS, PPC. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
11 | * | ||
12 | * Chris Zankel <chris@zankel.net> | ||
13 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | ||
14 | * Marc Gauthier | ||
15 | * Kevin Chea | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/bootmem.h> | ||
28 | #include <linux/swap.h> | ||
29 | |||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/bootparam.h> | ||
32 | #include <asm/mmu_context.h> | ||
33 | #include <asm/tlb.h> | ||
34 | #include <asm/tlbflush.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | |||
39 | |||
40 | #define DEBUG 0 | ||
41 | |||
42 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
43 | //static DEFINE_SPINLOCK(tlb_lock); | ||
44 | |||
45 | /* | ||
46 | * This flag is used to indicate that the page was mapped and modified in | ||
47 | * kernel space, so the cache is probably dirty at that address. | ||
48 | * If cache aliasing is enabled and the page color mismatches, update_mmu_cache | ||
49 | * synchronizes the caches if this bit is set. | ||
50 | */ | ||
51 | |||
52 | #define PG_cache_clean PG_arch_1 | ||
53 | |||
54 | /* References to section boundaries */ | ||
55 | |||
56 | extern char _ftext, _etext, _fdata, _edata, _rodata_end; | ||
57 | extern char __init_begin, __init_end; | ||
58 | |||
59 | /* | ||
60 | * mem_reserve(start, end, must_exist) | ||
61 | * | ||
62 | * Reserve some memory from the memory pool. | ||
63 | * | ||
64 | * Parameters: | ||
65 | * start Start of region, | ||
66 | * end End of region, | ||
67 | * must_exist Must exist in memory pool. | ||
68 | * | ||
69 | * Returns: | ||
70 | * 0 (memory area couldn't be mapped) | ||
71 | * -1 (success) | ||
72 | */ | ||
73 | |||
74 | int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) | ||
75 | { | ||
76 | int i; | ||
77 | |||
78 | if (start == end) | ||
79 | return 0; | ||
80 | |||
81 | start = start & PAGE_MASK; | ||
82 | end = PAGE_ALIGN(end); | ||
83 | |||
84 | for (i = 0; i < sysmem.nr_banks; i++) | ||
85 | if (start < sysmem.bank[i].end | ||
86 | && end >= sysmem.bank[i].start) | ||
87 | break; | ||
88 | |||
89 | if (i == sysmem.nr_banks) { | ||
90 | if (must_exist) | ||
91 | printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) " | ||
92 | "not in any region!\n", start, end); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | if (start > sysmem.bank[i].start) { | ||
97 | if (end < sysmem.bank[i].end) { | ||
98 | /* split entry */ | ||
99 | if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) | ||
100 | panic("meminfo overflow\n"); | ||
101 | sysmem.bank[sysmem.nr_banks].start = end; | ||
102 | sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end; | ||
103 | sysmem.nr_banks++; | ||
104 | } | ||
105 | sysmem.bank[i].end = start; | ||
106 | } else { | ||
107 | if (end < sysmem.bank[i].end) | ||
108 | sysmem.bank[i].start = end; | ||
109 | else { | ||
110 | /* remove entry */ | ||
111 | sysmem.nr_banks--; | ||
112 | sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; | ||
113 | sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; | ||
114 | } | ||
115 | } | ||
116 | return -1; | ||
117 | } | ||
118 | |||
119 | |||
120 | /* | ||
121 | * Initialize the bootmem system and give it all the memory we have available. | ||
122 | */ | ||
123 | |||
124 | void __init bootmem_init(void) | ||
125 | { | ||
126 | unsigned long pfn; | ||
127 | unsigned long bootmap_start, bootmap_size; | ||
128 | int i; | ||
129 | |||
130 | max_low_pfn = max_pfn = 0; | ||
131 | min_low_pfn = ~0; | ||
132 | |||
133 | for (i=0; i < sysmem.nr_banks; i++) { | ||
134 | pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; | ||
135 | if (pfn < min_low_pfn) | ||
136 | min_low_pfn = pfn; | ||
137 | pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; | ||
138 | if (pfn > max_pfn) | ||
139 | max_pfn = pfn; | ||
140 | } | ||
141 | |||
142 | if (min_low_pfn > max_pfn) | ||
143 | panic("No memory found!\n"); | ||
144 | |||
145 | max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ? | ||
146 | max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT; | ||
147 | |||
148 | /* Find an area to use for the bootmem bitmap. */ | ||
149 | |||
150 | bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT; | ||
151 | bootmap_start = ~0; | ||
152 | |||
153 | for (i=0; i<sysmem.nr_banks; i++) | ||
154 | if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) { | ||
155 | bootmap_start = sysmem.bank[i].start; | ||
156 | break; | ||
157 | } | ||
158 | |||
159 | if (bootmap_start == ~0UL) | ||
160 | panic("Cannot find %ld bytes for bootmap\n", bootmap_size); | ||
161 | |||
162 | /* Reserve the bootmem bitmap area */ | ||
163 | |||
164 | mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1); | ||
165 | bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn, | ||
166 | bootmap_start >> PAGE_SHIFT, | ||
167 | max_low_pfn); | ||
168 | |||
169 | /* Add all remaining memory pieces into the bootmem map */ | ||
170 | |||
171 | for (i=0; i<sysmem.nr_banks; i++) | ||
172 | free_bootmem(sysmem.bank[i].start, | ||
173 | sysmem.bank[i].end - sysmem.bank[i].start); | ||
174 | |||
175 | } | ||
176 | |||
177 | |||
178 | void __init paging_init(void) | ||
179 | { | ||
180 | unsigned long zones_size[MAX_NR_ZONES]; | ||
181 | int i; | ||
182 | |||
183 | /* All pages are DMA-able, so we put them all in the DMA zone. */ | ||
184 | |||
185 | zones_size[ZONE_DMA] = max_low_pfn; | ||
186 | for (i = 1; i < MAX_NR_ZONES; i++) | ||
187 | zones_size[i] = 0; | ||
188 | |||
189 | #ifdef CONFIG_HIGHMEM | ||
190 | zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; | ||
191 | #endif | ||
192 | |||
193 | /* Initialize the kernel's page tables. */ | ||
194 | |||
195 | memset(swapper_pg_dir, 0, PAGE_SIZE); | ||
196 | |||
197 | free_area_init(zones_size); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Flush the mmu and reset associated register to default values. | ||
202 | */ | ||
203 | |||
204 | void __init init_mmu (void) | ||
205 | { | ||
206 | /* Writing zeros to the <t>TLBCFG special registers ensure | ||
207 | * that valid values exist in the register. For existing | ||
208 | * PGSZID<w> fields, zero selects the first element of the | ||
209 | * page-size array. For nonexistant PGSZID<w> fields, zero is | ||
210 | * the best value to write. Also, when changing PGSZID<w> | ||
211 | * fields, the corresponding TLB must be flushed. | ||
212 | */ | ||
213 | set_itlbcfg_register (0); | ||
214 | set_dtlbcfg_register (0); | ||
215 | flush_tlb_all (); | ||
216 | |||
217 | /* Set rasid register to a known value. */ | ||
218 | |||
219 | set_rasid_register (ASID_ALL_RESERVED); | ||
220 | |||
221 | /* Set PTEVADDR special register to the start of the page | ||
222 | * table, which is in kernel mappable space (ie. not | ||
223 | * statically mapped). This register's value is undefined on | ||
224 | * reset. | ||
225 | */ | ||
226 | set_ptevaddr_register (PGTABLE_START); | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Initialize memory pages. | ||
231 | */ | ||
232 | |||
233 | void __init mem_init(void) | ||
234 | { | ||
235 | unsigned long codesize, reservedpages, datasize, initsize; | ||
236 | unsigned long highmemsize, tmp, ram; | ||
237 | |||
238 | max_mapnr = num_physpages = max_low_pfn; | ||
239 | high_memory = (void *) __va(max_mapnr << PAGE_SHIFT); | ||
240 | highmemsize = 0; | ||
241 | |||
242 | #if CONFIG_HIGHMEM | ||
243 | #error HIGHGMEM not implemented in init.c | ||
244 | #endif | ||
245 | |||
246 | totalram_pages += free_all_bootmem(); | ||
247 | |||
248 | reservedpages = ram = 0; | ||
249 | for (tmp = 0; tmp < max_low_pfn; tmp++) { | ||
250 | ram++; | ||
251 | if (PageReserved(mem_map+tmp)) | ||
252 | reservedpages++; | ||
253 | } | ||
254 | |||
255 | codesize = (unsigned long) &_etext - (unsigned long) &_ftext; | ||
256 | datasize = (unsigned long) &_edata - (unsigned long) &_fdata; | ||
257 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
258 | |||
259 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " | ||
260 | "%ldk data, %ldk init %ldk highmem)\n", | ||
261 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
262 | ram << (PAGE_SHIFT-10), | ||
263 | codesize >> 10, | ||
264 | reservedpages << (PAGE_SHIFT-10), | ||
265 | datasize >> 10, | ||
266 | initsize >> 10, | ||
267 | highmemsize >> 10); | ||
268 | } | ||
269 | |||
270 | void | ||
271 | free_reserved_mem(void *start, void *end) | ||
272 | { | ||
273 | for (; start < end; start += PAGE_SIZE) { | ||
274 | ClearPageReserved(virt_to_page(start)); | ||
275 | set_page_count(virt_to_page(start), 1); | ||
276 | free_page((unsigned long)start); | ||
277 | totalram_pages++; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | #ifdef CONFIG_BLK_DEV_INITRD | ||
282 | extern int initrd_is_mapped; | ||
283 | |||
284 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
285 | { | ||
286 | if (initrd_is_mapped) { | ||
287 | free_reserved_mem((void*)start, (void*)end); | ||
288 | printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); | ||
289 | } | ||
290 | } | ||
291 | #endif | ||
292 | |||
293 | void free_initmem(void) | ||
294 | { | ||
295 | free_reserved_mem(&__init_begin, &__init_end); | ||
296 | printk("Freeing unused kernel memory: %dk freed\n", | ||
297 | (&__init_end - &__init_begin) >> 10); | ||
298 | } | ||
299 | |||
300 | void show_mem(void) | ||
301 | { | ||
302 | int i, free = 0, total = 0, reserved = 0; | ||
303 | int shared = 0, cached = 0; | ||
304 | |||
305 | printk("Mem-info:\n"); | ||
306 | show_free_areas(); | ||
307 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
308 | i = max_mapnr; | ||
309 | while (i-- > 0) { | ||
310 | total++; | ||
311 | if (PageReserved(mem_map+i)) | ||
312 | reserved++; | ||
313 | else if (PageSwapCache(mem_map+i)) | ||
314 | cached++; | ||
315 | else if (!page_count(mem_map + i)) | ||
316 | free++; | ||
317 | else | ||
318 | shared += page_count(mem_map + i) - 1; | ||
319 | } | ||
320 | printk("%d pages of RAM\n", total); | ||
321 | printk("%d reserved pages\n", reserved); | ||
322 | printk("%d pages shared\n", shared); | ||
323 | printk("%d pages swap cached\n",cached); | ||
324 | printk("%d free pages\n", free); | ||
325 | } | ||
326 | |||
327 | /* ------------------------------------------------------------------------- */ | ||
328 | |||
329 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | ||
330 | |||
331 | /* | ||
332 | * With cache aliasing, the page color of the page in kernel space and user | ||
333 | * space might mismatch. We temporarily map the page to a different virtual | ||
334 | * address with the same color and clear the page there. | ||
335 | */ | ||
336 | |||
337 | void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page) | ||
338 | { | ||
339 | |||
340 | /* There shouldn't be any entries for this page. */ | ||
341 | |||
342 | __flush_invalidate_dcache_page_phys(__pa(page_address(page))); | ||
343 | |||
344 | if (!PAGE_COLOR_EQ(vaddr, kaddr)) { | ||
345 | unsigned long v, p; | ||
346 | |||
347 | /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */ | ||
348 | |||
349 | spin_lock(&tlb_lock); | ||
350 | |||
351 | p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL))); | ||
352 | kaddr = (void*)PAGE_COLOR_MAP0(vaddr); | ||
353 | v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0; | ||
354 | __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v)); | ||
355 | |||
356 | clear_page(kaddr); | ||
357 | |||
358 | spin_unlock(&tlb_lock); | ||
359 | } else { | ||
360 | clear_page(kaddr); | ||
361 | } | ||
362 | |||
363 | /* We need to make sure that i$ and d$ are coherent. */ | ||
364 | |||
365 | clear_bit(PG_cache_clean, &page->flags); | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * With cache aliasing, we have to make sure that the page color of the page | ||
370 | * in kernel space matches that of the virtual user address before we read | ||
371 | * the page. If the page color differ, we create a temporary DTLB entry with | ||
372 | * the corrent page color and use this 'temporary' address as the source. | ||
373 | * We then use the same approach as in clear_user_page and copy the data | ||
374 | * to the kernel space and clear the PG_cache_clean bit to synchronize caches | ||
375 | * later. | ||
376 | * | ||
377 | * Note: | ||
378 | * Instead of using another 'way' for the temporary DTLB entry, we could | ||
379 | * probably use the same entry that points to the kernel address (after | ||
380 | * saving the original value and restoring it when we are done). | ||
381 | */ | ||
382 | |||
383 | void copy_user_page(void* to, void* from, unsigned long vaddr, | ||
384 | struct page* to_page) | ||
385 | { | ||
386 | /* There shouldn't be any entries for the new page. */ | ||
387 | |||
388 | __flush_invalidate_dcache_page_phys(__pa(page_address(to_page))); | ||
389 | |||
390 | spin_lock(&tlb_lock); | ||
391 | |||
392 | if (!PAGE_COLOR_EQ(vaddr, from)) { | ||
393 | unsigned long v, p, t; | ||
394 | |||
395 | __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1" | ||
396 | : "=a"(p), "=a"(t) : "a"(from)); | ||
397 | from = (void*)PAGE_COLOR_MAP0(vaddr); | ||
398 | v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0; | ||
399 | __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v)); | ||
400 | } | ||
401 | |||
402 | if (!PAGE_COLOR_EQ(vaddr, to)) { | ||
403 | unsigned long v, p; | ||
404 | |||
405 | p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL))); | ||
406 | to = (void*)PAGE_COLOR_MAP1(vaddr); | ||
407 | v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1; | ||
408 | __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v)); | ||
409 | } | ||
410 | copy_page(to, from); | ||
411 | |||
412 | spin_unlock(&tlb_lock); | ||
413 | |||
414 | /* We need to make sure that i$ and d$ are coherent. */ | ||
415 | |||
416 | clear_bit(PG_cache_clean, &to_page->flags); | ||
417 | } | ||
418 | |||
419 | |||
420 | |||
421 | /* | ||
422 | * Any time the kernel writes to a user page cache page, or it is about to | ||
423 | * read from a page cache page this routine is called. | ||
424 | * | ||
425 | * Note: | ||
426 | * The kernel currently only provides one architecture bit in the page | ||
427 | * flags that we use for I$/D$ coherency. Maybe, in future, we can | ||
428 | * use a sepearte bit for deferred dcache aliasing: | ||
429 | * If the page is not mapped yet, we only need to set a flag, | ||
430 | * if mapped, we need to invalidate the page. | ||
431 | */ | ||
432 | // FIXME: we probably need this for WB caches not only for Page Coloring.. | ||
433 | |||
434 | void flush_dcache_page(struct page *page) | ||
435 | { | ||
436 | unsigned long addr = __pa(page_address(page)); | ||
437 | struct address_space *mapping = page_mapping(page); | ||
438 | |||
439 | __flush_invalidate_dcache_page_phys(addr); | ||
440 | |||
441 | if (!test_bit(PG_cache_clean, &page->flags)) | ||
442 | return; | ||
443 | |||
444 | /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/ | ||
445 | #if 0 | ||
446 | if (mapping && !mapping_mapped(mapping)) | ||
447 | clear_bit(PG_cache_clean, &page->flags); | ||
448 | else | ||
449 | #endif | ||
450 | __invalidate_icache_page_phys(addr); | ||
451 | } | ||
452 | |||
453 | void flush_cache_range(struct vm_area_struct* vma, unsigned long s, | ||
454 | unsigned long e) | ||
455 | { | ||
456 | __flush_invalidate_cache_all(); | ||
457 | } | ||
458 | |||
459 | void flush_cache_page(struct vm_area_struct* vma, unsigned long address, | ||
460 | unsigned long pfn) | ||
461 | { | ||
462 | struct page *page = pfn_to_page(pfn); | ||
463 | |||
464 | /* Remove any entry for the old mapping. */ | ||
465 | |||
466 | if (current->active_mm == vma->vm_mm) { | ||
467 | unsigned long addr = __pa(page_address(page)); | ||
468 | __flush_invalidate_dcache_page_phys(addr); | ||
469 | if ((vma->vm_flags & VM_EXEC) != 0) | ||
470 | __invalidate_icache_page_phys(addr); | ||
471 | } else { | ||
472 | BUG(); | ||
473 | } | ||
474 | } | ||
475 | |||
476 | #endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */ | ||
477 | |||
478 | |||
479 | pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr) | ||
480 | { | ||
481 | pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0); | ||
482 | if (likely(pte)) { | ||
483 | pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET); | ||
484 | int i; | ||
485 | for (i = 0; i < 1024; i++, ptep++) | ||
486 | pte_clear(mm, addr, ptep); | ||
487 | } | ||
488 | return pte; | ||
489 | } | ||
490 | |||
491 | struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
492 | { | ||
493 | struct page *page; | ||
494 | |||
495 | page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0); | ||
496 | |||
497 | if (likely(page)) { | ||
498 | pte_t* ptep = kmap_atomic(page, KM_USER0); | ||
499 | int i; | ||
500 | |||
501 | for (i = 0; i < 1024; i++, ptep++) | ||
502 | pte_clear(mm, addr, ptep); | ||
503 | |||
504 | kunmap_atomic(ptep, KM_USER0); | ||
505 | } | ||
506 | return page; | ||
507 | } | ||
508 | |||
509 | |||
510 | /* | ||
511 | * Handle D$/I$ coherency. | ||
512 | * | ||
513 | * Note: | ||
514 | * We only have one architecture bit for the page flags, so we cannot handle | ||
515 | * cache aliasing, yet. | ||
516 | */ | ||
517 | |||
518 | void | ||
519 | update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) | ||
520 | { | ||
521 | unsigned long pfn = pte_pfn(pte); | ||
522 | struct page *page; | ||
523 | unsigned long vaddr = addr & PAGE_MASK; | ||
524 | |||
525 | if (!pfn_valid(pfn)) | ||
526 | return; | ||
527 | |||
528 | page = pfn_to_page(pfn); | ||
529 | |||
530 | invalidate_itlb_mapping(addr); | ||
531 | invalidate_dtlb_mapping(addr); | ||
532 | |||
533 | /* We have a new mapping. Use it. */ | ||
534 | |||
535 | write_dtlb_entry(pte, dtlb_probe(addr)); | ||
536 | |||
537 | /* If the processor can execute from this page, synchronize D$/I$. */ | ||
538 | |||
539 | if ((vma->vm_flags & VM_EXEC) != 0) { | ||
540 | |||
541 | write_itlb_entry(pte, itlb_probe(addr)); | ||
542 | |||
543 | /* Synchronize caches, if not clean. */ | ||
544 | |||
545 | if (!test_and_set_bit(PG_cache_clean, &page->flags)) { | ||
546 | __flush_dcache_page(vaddr); | ||
547 | __invalidate_icache_page(vaddr); | ||
548 | } | ||
549 | } | ||
550 | } | ||
551 | |||
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S new file mode 100644 index 000000000000..327c0f17187c --- /dev/null +++ b/arch/xtensa/mm/misc.S | |||
@@ -0,0 +1,374 @@ | |||
1 | /* | ||
2 | * arch/xtensa/mm/misc.S | ||
3 | * | ||
4 | * Miscellaneous assembly functions. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
11 | * | ||
12 | * Chris Zankel <chris@zankel.net> | ||
13 | */ | ||
14 | |||
15 | /* Note: we might want to implement some of the loops as zero-overhead-loops, | ||
16 | * where applicable and if supported by the processor. | ||
17 | */ | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgtable.h> | ||
22 | |||
23 | #include <xtensa/cacheasm.h> | ||
24 | #include <xtensa/cacheattrasm.h> | ||
25 | |||
26 | /* clear_page (page) */ | ||
27 | |||
28 | ENTRY(clear_page) | ||
29 | entry a1, 16 | ||
30 | addi a4, a2, PAGE_SIZE | ||
31 | movi a3, 0 | ||
32 | |||
33 | 1: s32i a3, a2, 0 | ||
34 | s32i a3, a2, 4 | ||
35 | s32i a3, a2, 8 | ||
36 | s32i a3, a2, 12 | ||
37 | s32i a3, a2, 16 | ||
38 | s32i a3, a2, 20 | ||
39 | s32i a3, a2, 24 | ||
40 | s32i a3, a2, 28 | ||
41 | addi a2, a2, 32 | ||
42 | blt a2, a4, 1b | ||
43 | |||
44 | retw | ||
45 | |||
46 | /* | ||
47 | * copy_page (void *to, void *from) | ||
48 | * a2 a3 | ||
49 | */ | ||
50 | |||
51 | ENTRY(copy_page) | ||
52 | entry a1, 16 | ||
53 | addi a4, a2, PAGE_SIZE | ||
54 | |||
55 | 1: l32i a5, a3, 0 | ||
56 | l32i a6, a3, 4 | ||
57 | l32i a7, a3, 8 | ||
58 | s32i a5, a2, 0 | ||
59 | s32i a6, a2, 4 | ||
60 | s32i a7, a2, 8 | ||
61 | l32i a5, a3, 12 | ||
62 | l32i a6, a3, 16 | ||
63 | l32i a7, a3, 20 | ||
64 | s32i a5, a2, 12 | ||
65 | s32i a6, a2, 16 | ||
66 | s32i a7, a2, 20 | ||
67 | l32i a5, a3, 24 | ||
68 | l32i a6, a3, 28 | ||
69 | s32i a5, a2, 24 | ||
70 | s32i a6, a2, 28 | ||
71 | addi a2, a2, 32 | ||
72 | addi a3, a3, 32 | ||
73 | blt a2, a4, 1b | ||
74 | |||
75 | retw | ||
76 | |||
77 | |||
78 | /* | ||
79 | * void __flush_invalidate_cache_all(void) | ||
80 | */ | ||
81 | |||
82 | ENTRY(__flush_invalidate_cache_all) | ||
83 | entry sp, 16 | ||
84 | dcache_writeback_inv_all a2, a3 | ||
85 | icache_invalidate_all a2, a3 | ||
86 | retw | ||
87 | |||
88 | /* | ||
89 | * void __invalidate_icache_all(void) | ||
90 | */ | ||
91 | |||
92 | ENTRY(__invalidate_icache_all) | ||
93 | entry sp, 16 | ||
94 | icache_invalidate_all a2, a3 | ||
95 | retw | ||
96 | |||
97 | /* | ||
98 | * void __flush_invalidate_dcache_all(void) | ||
99 | */ | ||
100 | |||
101 | ENTRY(__flush_invalidate_dcache_all) | ||
102 | entry sp, 16 | ||
103 | dcache_writeback_inv_all a2, a3 | ||
104 | retw | ||
105 | |||
106 | |||
107 | /* | ||
108 | * void __flush_invalidate_cache_range(ulong start, ulong size) | ||
109 | */ | ||
110 | |||
111 | ENTRY(__flush_invalidate_cache_range) | ||
112 | entry sp, 16 | ||
113 | mov a4, a2 | ||
114 | mov a5, a3 | ||
115 | dcache_writeback_inv_region a4, a5, a6 | ||
116 | icache_invalidate_region a2, a3, a4 | ||
117 | retw | ||
118 | |||
119 | /* | ||
120 | * void __invalidate_icache_page(ulong start) | ||
121 | */ | ||
122 | |||
123 | ENTRY(__invalidate_icache_page) | ||
124 | entry sp, 16 | ||
125 | movi a3, PAGE_SIZE | ||
126 | icache_invalidate_region a2, a3, a4 | ||
127 | retw | ||
128 | |||
129 | /* | ||
130 | * void __invalidate_dcache_page(ulong start) | ||
131 | */ | ||
132 | |||
133 | ENTRY(__invalidate_dcache_page) | ||
134 | entry sp, 16 | ||
135 | movi a3, PAGE_SIZE | ||
136 | dcache_invalidate_region a2, a3, a4 | ||
137 | retw | ||
138 | |||
139 | /* | ||
140 | * void __invalidate_icache_range(ulong start, ulong size) | ||
141 | */ | ||
142 | |||
143 | ENTRY(__invalidate_icache_range) | ||
144 | entry sp, 16 | ||
145 | icache_invalidate_region a2, a3, a4 | ||
146 | retw | ||
147 | |||
148 | /* | ||
149 | * void __invalidate_dcache_range(ulong start, ulong size) | ||
150 | */ | ||
151 | |||
152 | ENTRY(__invalidate_dcache_range) | ||
153 | entry sp, 16 | ||
154 | dcache_invalidate_region a2, a3, a4 | ||
155 | retw | ||
156 | |||
157 | /* | ||
158 | * void __flush_dcache_page(ulong start) | ||
159 | */ | ||
160 | |||
161 | ENTRY(__flush_dcache_page) | ||
162 | entry sp, 16 | ||
163 | movi a3, PAGE_SIZE | ||
164 | dcache_writeback_region a2, a3, a4 | ||
165 | retw | ||
166 | |||
167 | /* | ||
168 | * void __flush_invalidate_dcache_page(ulong start) | ||
169 | */ | ||
170 | |||
171 | ENTRY(__flush_invalidate_dcache_page) | ||
172 | entry sp, 16 | ||
173 | movi a3, PAGE_SIZE | ||
174 | dcache_writeback_inv_region a2, a3, a4 | ||
175 | retw | ||
176 | |||
177 | /* | ||
178 | * void __flush_invalidate_dcache_range(ulong start, ulong size) | ||
179 | */ | ||
180 | |||
181 | ENTRY(__flush_invalidate_dcache_range) | ||
182 | entry sp, 16 | ||
183 | dcache_writeback_inv_region a2, a3, a4 | ||
184 | retw | ||
185 | |||
186 | /* | ||
187 | * void __invalidate_dcache_all(void) | ||
188 | */ | ||
189 | |||
190 | ENTRY(__invalidate_dcache_all) | ||
191 | entry sp, 16 | ||
192 | dcache_invalidate_all a2, a3 | ||
193 | retw | ||
194 | |||
195 | /* | ||
196 | * void __flush_invalidate_dcache_page_phys(ulong start) | ||
197 | */ | ||
198 | |||
199 | ENTRY(__flush_invalidate_dcache_page_phys) | ||
200 | entry sp, 16 | ||
201 | |||
202 | movi a3, XCHAL_DCACHE_SIZE | ||
203 | movi a4, PAGE_MASK | 1 | ||
204 | addi a2, a2, 1 | ||
205 | |||
206 | 1: addi a3, a3, -XCHAL_DCACHE_LINESIZE | ||
207 | |||
208 | ldct a6, a3 | ||
209 | dsync | ||
210 | and a6, a6, a4 | ||
211 | beq a6, a2, 2f | ||
212 | bgeui a3, 2, 1b | ||
213 | retw | ||
214 | |||
215 | 2: diwbi a3, 0 | ||
216 | bgeui a3, 2, 1b | ||
217 | retw | ||
218 | |||
219 | ENTRY(check_dcache_low0) | ||
220 | entry sp, 16 | ||
221 | |||
222 | movi a3, XCHAL_DCACHE_SIZE / 4 | ||
223 | movi a4, PAGE_MASK | 1 | ||
224 | addi a2, a2, 1 | ||
225 | |||
226 | 1: addi a3, a3, -XCHAL_DCACHE_LINESIZE | ||
227 | |||
228 | ldct a6, a3 | ||
229 | dsync | ||
230 | and a6, a6, a4 | ||
231 | beq a6, a2, 2f | ||
232 | bgeui a3, 2, 1b | ||
233 | retw | ||
234 | |||
235 | 2: j 2b | ||
236 | |||
237 | ENTRY(check_dcache_high0) | ||
238 | entry sp, 16 | ||
239 | |||
240 | movi a5, XCHAL_DCACHE_SIZE / 4 | ||
241 | movi a3, XCHAL_DCACHE_SIZE / 2 | ||
242 | movi a4, PAGE_MASK | 1 | ||
243 | addi a2, a2, 1 | ||
244 | |||
245 | 1: addi a3, a3, -XCHAL_DCACHE_LINESIZE | ||
246 | addi a5, a5, -XCHAL_DCACHE_LINESIZE | ||
247 | |||
248 | ldct a6, a3 | ||
249 | dsync | ||
250 | and a6, a6, a4 | ||
251 | beq a6, a2, 2f | ||
252 | bgeui a5, 2, 1b | ||
253 | retw | ||
254 | |||
255 | 2: j 2b | ||
256 | |||
257 | ENTRY(check_dcache_low1) | ||
258 | entry sp, 16 | ||
259 | |||
260 | movi a5, XCHAL_DCACHE_SIZE / 4 | ||
261 | movi a3, XCHAL_DCACHE_SIZE * 3 / 4 | ||
262 | movi a4, PAGE_MASK | 1 | ||
263 | addi a2, a2, 1 | ||
264 | |||
265 | 1: addi a3, a3, -XCHAL_DCACHE_LINESIZE | ||
266 | addi a5, a5, -XCHAL_DCACHE_LINESIZE | ||
267 | |||
268 | ldct a6, a3 | ||
269 | dsync | ||
270 | and a6, a6, a4 | ||
271 | beq a6, a2, 2f | ||
272 | bgeui a5, 2, 1b | ||
273 | retw | ||
274 | |||
275 | 2: j 2b | ||
276 | |||
277 | ENTRY(check_dcache_high1) | ||
278 | entry sp, 16 | ||
279 | |||
280 | movi a5, XCHAL_DCACHE_SIZE / 4 | ||
281 | movi a3, XCHAL_DCACHE_SIZE | ||
282 | movi a4, PAGE_MASK | 1 | ||
283 | addi a2, a2, 1 | ||
284 | |||
285 | 1: addi a3, a3, -XCHAL_DCACHE_LINESIZE | ||
286 | addi a5, a5, -XCHAL_DCACHE_LINESIZE | ||
287 | |||
288 | ldct a6, a3 | ||
289 | dsync | ||
290 | and a6, a6, a4 | ||
291 | beq a6, a2, 2f | ||
292 | bgeui a5, 2, 1b | ||
293 | retw | ||
294 | |||
295 | 2: j 2b | ||
296 | |||
297 | |||
298 | /* | ||
299 | * void __invalidate_icache_page_phys(ulong start) | ||
300 | */ | ||
301 | |||
302 | ENTRY(__invalidate_icache_page_phys) | ||
303 | entry sp, 16 | ||
304 | |||
305 | movi a3, XCHAL_ICACHE_SIZE | ||
306 | movi a4, PAGE_MASK | 1 | ||
307 | addi a2, a2, 1 | ||
308 | |||
309 | 1: addi a3, a3, -XCHAL_ICACHE_LINESIZE | ||
310 | |||
311 | lict a6, a3 | ||
312 | isync | ||
313 | and a6, a6, a4 | ||
314 | beq a6, a2, 2f | ||
315 | bgeui a3, 2, 1b | ||
316 | retw | ||
317 | |||
318 | 2: iii a3, 0 | ||
319 | bgeui a3, 2, 1b | ||
320 | retw | ||
321 | |||
322 | |||
323 | #if 0 | ||
324 | |||
325 | movi a3, XCHAL_DCACHE_WAYS - 1 | ||
326 | movi a4, PAGE_SIZE | ||
327 | |||
328 | 1: mov a5, a2 | ||
329 | add a6, a2, a4 | ||
330 | |||
331 | 2: diwbi a5, 0 | ||
332 | diwbi a5, XCHAL_DCACHE_LINESIZE | ||
333 | diwbi a5, XCHAL_DCACHE_LINESIZE * 2 | ||
334 | diwbi a5, XCHAL_DCACHE_LINESIZE * 3 | ||
335 | |||
336 | addi a5, a5, XCHAL_DCACHE_LINESIZE * 4 | ||
337 | blt a5, a6, 2b | ||
338 | |||
339 | addi a3, a3, -1 | ||
340 | addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS | ||
341 | bgez a3, 1b | ||
342 | |||
343 | retw | ||
344 | |||
345 | ENTRY(__invalidate_icache_page_index) | ||
346 | entry sp, 16 | ||
347 | |||
348 | movi a3, XCHAL_ICACHE_WAYS - 1 | ||
349 | movi a4, PAGE_SIZE | ||
350 | |||
351 | 1: mov a5, a2 | ||
352 | add a6, a2, a4 | ||
353 | |||
354 | 2: iii a5, 0 | ||
355 | iii a5, XCHAL_ICACHE_LINESIZE | ||
356 | iii a5, XCHAL_ICACHE_LINESIZE * 2 | ||
357 | iii a5, XCHAL_ICACHE_LINESIZE * 3 | ||
358 | |||
359 | addi a5, a5, XCHAL_ICACHE_LINESIZE * 4 | ||
360 | blt a5, a6, 2b | ||
361 | |||
362 | addi a3, a3, -1 | ||
363 | addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS | ||
364 | bgez a3, 2b | ||
365 | |||
366 | retw | ||
367 | |||
368 | #endif | ||
369 | |||
370 | |||
371 | |||
372 | |||
373 | |||
374 | |||
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c new file mode 100644 index 000000000000..e5e119c820e4 --- /dev/null +++ b/arch/xtensa/mm/pgtable.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * arch/xtensa/mm/fault.c | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
9 | * | ||
10 | * Chris Zankel <chris@zankel.net> | ||
11 | */ | ||
12 | |||
13 | #if (DCACHE_SIZE > PAGE_SIZE) | ||
14 | |||
15 | pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
16 | { | ||
17 | pte_t *pte, p; | ||
18 | int color = ADDR_COLOR(address); | ||
19 | int i; | ||
20 | |||
21 | p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER); | ||
22 | |||
23 | if (likely(p)) { | ||
24 | struct page *page; | ||
25 | |||
26 | for (i = 0; i < COLOR_SIZE; i++, p++) { | ||
27 | page = virt_to_page(pte); | ||
28 | |||
29 | set_page_count(page, 1); | ||
30 | ClearPageCompound(page); | ||
31 | |||
32 | if (ADDR_COLOR(p) == color) | ||
33 | pte = p; | ||
34 | else | ||
35 | free_page(p); | ||
36 | } | ||
37 | clear_page(pte); | ||
38 | } | ||
39 | return pte; | ||
40 | } | ||
41 | |||
42 | #ifdef PROFILING | ||
43 | |||
44 | int mask; | ||
45 | int hit; | ||
46 | int flush; | ||
47 | |||
48 | #endif | ||
49 | |||
50 | struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
51 | { | ||
52 | struct page *page, p; | ||
53 | int color = ADDR_COLOR(address); | ||
54 | |||
55 | p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); | ||
56 | |||
57 | if (likely(p)) { | ||
58 | for (i = 0; i < PAGE_ORDER; i++) { | ||
59 | set_page_count(p, 1); | ||
60 | ClearPageCompound(p); | ||
61 | |||
62 | if (PADDR_COLOR(page_address(pg)) == color) | ||
63 | page = p; | ||
64 | else | ||
65 | free_page(p); | ||
66 | } | ||
67 | clear_highpage(page); | ||
68 | } | ||
69 | |||
70 | return page; | ||
71 | } | ||
72 | |||
73 | #endif | ||
74 | |||
75 | |||
76 | |||
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c new file mode 100644 index 000000000000..d3bd3bfc3b3b --- /dev/null +++ b/arch/xtensa/mm/tlb.c | |||
@@ -0,0 +1,545 @@ | |||
1 | /* | ||
2 | * arch/xtensa/mm/mmu.c | ||
3 | * | ||
4 | * Logic that manipulates the Xtensa MMU. Derived from MIPS. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (C) 2001 - 2003 Tensilica Inc. | ||
11 | * | ||
12 | * Joe Taylor | ||
13 | * Chris Zankel <chris@zankel.net> | ||
14 | * Marc Gauthier | ||
15 | */ | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/mmu_context.h> | ||
20 | #include <asm/tlbflush.h> | ||
21 | #include <asm/system.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | |||
24 | |||
25 | static inline void __flush_itlb_all (void) | ||
26 | { | ||
27 | int way, index; | ||
28 | |||
29 | for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) { | ||
30 | for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) { | ||
31 | int entry = way + (index << PAGE_SHIFT); | ||
32 | invalidate_itlb_entry_no_isync (entry); | ||
33 | } | ||
34 | } | ||
35 | asm volatile ("isync\n"); | ||
36 | } | ||
37 | |||
38 | static inline void __flush_dtlb_all (void) | ||
39 | { | ||
40 | int way, index; | ||
41 | |||
42 | for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) { | ||
43 | for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) { | ||
44 | int entry = way + (index << PAGE_SHIFT); | ||
45 | invalidate_dtlb_entry_no_isync (entry); | ||
46 | } | ||
47 | } | ||
48 | asm volatile ("isync\n"); | ||
49 | } | ||
50 | |||
51 | |||
52 | void flush_tlb_all (void) | ||
53 | { | ||
54 | __flush_itlb_all(); | ||
55 | __flush_dtlb_all(); | ||
56 | } | ||
57 | |||
58 | /* If mm is current, we simply assign the current task a new ASID, thus, | ||
59 | * invalidating all previous tlb entries. If mm is someone else's user mapping, | ||
60 | * wie invalidate the context, thus, when that user mapping is swapped in, | ||
61 | * a new context will be assigned to it. | ||
62 | */ | ||
63 | |||
64 | void flush_tlb_mm(struct mm_struct *mm) | ||
65 | { | ||
66 | #if 0 | ||
67 | printk("[tlbmm<%lx>]\n", (unsigned long)mm->context); | ||
68 | #endif | ||
69 | |||
70 | if (mm == current->active_mm) { | ||
71 | int flags; | ||
72 | local_save_flags(flags); | ||
73 | get_new_mmu_context(mm, asid_cache); | ||
74 | set_rasid_register(ASID_INSERT(mm->context)); | ||
75 | local_irq_restore(flags); | ||
76 | } | ||
77 | else | ||
78 | mm->context = 0; | ||
79 | } | ||
80 | |||
81 | void flush_tlb_range (struct vm_area_struct *vma, | ||
82 | unsigned long start, unsigned long end) | ||
83 | { | ||
84 | struct mm_struct *mm = vma->vm_mm; | ||
85 | unsigned long flags; | ||
86 | |||
87 | if (mm->context == NO_CONTEXT) | ||
88 | return; | ||
89 | |||
90 | #if 0 | ||
91 | printk("[tlbrange<%02lx,%08lx,%08lx>]\n", | ||
92 | (unsigned long)mm->context, start, end); | ||
93 | #endif | ||
94 | local_save_flags(flags); | ||
95 | |||
96 | if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) { | ||
97 | int oldpid = get_rasid_register(); | ||
98 | set_rasid_register (ASID_INSERT(mm->context)); | ||
99 | start &= PAGE_MASK; | ||
100 | if (vma->vm_flags & VM_EXEC) | ||
101 | while(start < end) { | ||
102 | invalidate_itlb_mapping(start); | ||
103 | invalidate_dtlb_mapping(start); | ||
104 | start += PAGE_SIZE; | ||
105 | } | ||
106 | else | ||
107 | while(start < end) { | ||
108 | invalidate_dtlb_mapping(start); | ||
109 | start += PAGE_SIZE; | ||
110 | } | ||
111 | |||
112 | set_rasid_register(oldpid); | ||
113 | } else { | ||
114 | get_new_mmu_context(mm, asid_cache); | ||
115 | if (mm == current->active_mm) | ||
116 | set_rasid_register(ASID_INSERT(mm->context)); | ||
117 | } | ||
118 | local_irq_restore(flags); | ||
119 | } | ||
120 | |||
121 | void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) | ||
122 | { | ||
123 | struct mm_struct* mm = vma->vm_mm; | ||
124 | unsigned long flags; | ||
125 | int oldpid; | ||
126 | #if 0 | ||
127 | printk("[tlbpage<%02lx,%08lx>]\n", | ||
128 | (unsigned long)mm->context, page); | ||
129 | #endif | ||
130 | |||
131 | if(mm->context == NO_CONTEXT) | ||
132 | return; | ||
133 | |||
134 | local_save_flags(flags); | ||
135 | |||
136 | oldpid = get_rasid_register(); | ||
137 | |||
138 | if (vma->vm_flags & VM_EXEC) | ||
139 | invalidate_itlb_mapping(page); | ||
140 | invalidate_dtlb_mapping(page); | ||
141 | |||
142 | set_rasid_register(oldpid); | ||
143 | |||
144 | local_irq_restore(flags); | ||
145 | |||
146 | #if 0 | ||
147 | flush_tlb_all(); | ||
148 | return; | ||
149 | #endif | ||
150 | } | ||
151 | |||
152 | |||
153 | #ifdef DEBUG_TLB | ||
154 | |||
155 | #define USE_ITLB 0 | ||
156 | #define USE_DTLB 1 | ||
157 | |||
158 | struct way_config_t { | ||
159 | int indicies; | ||
160 | int indicies_log2; | ||
161 | int pgsz_log2; | ||
162 | int arf; | ||
163 | }; | ||
164 | |||
165 | static struct way_config_t itlb[XCHAL_ITLB_WAYS] = | ||
166 | { | ||
167 | { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES), | ||
168 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2), | ||
169 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN), | ||
170 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF) | ||
171 | }, | ||
172 | { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES), | ||
173 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2), | ||
174 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN), | ||
175 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF) | ||
176 | }, | ||
177 | { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES), | ||
178 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2), | ||
179 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN), | ||
180 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF) | ||
181 | }, | ||
182 | { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES), | ||
183 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2), | ||
184 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN), | ||
185 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF) | ||
186 | }, | ||
187 | { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES), | ||
188 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2), | ||
189 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN), | ||
190 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF) | ||
191 | }, | ||
192 | { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES), | ||
193 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2), | ||
194 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN), | ||
195 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF) | ||
196 | }, | ||
197 | { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES), | ||
198 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2), | ||
199 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN), | ||
200 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF) | ||
201 | } | ||
202 | }; | ||
203 | |||
204 | static struct way_config_t dtlb[XCHAL_DTLB_WAYS] = | ||
205 | { | ||
206 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES), | ||
207 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2), | ||
208 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN), | ||
209 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF) | ||
210 | }, | ||
211 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES), | ||
212 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2), | ||
213 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN), | ||
214 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF) | ||
215 | }, | ||
216 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES), | ||
217 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2), | ||
218 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN), | ||
219 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF) | ||
220 | }, | ||
221 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES), | ||
222 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2), | ||
223 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN), | ||
224 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF) | ||
225 | }, | ||
226 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES), | ||
227 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2), | ||
228 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN), | ||
229 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF) | ||
230 | }, | ||
231 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES), | ||
232 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2), | ||
233 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN), | ||
234 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF) | ||
235 | }, | ||
236 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES), | ||
237 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2), | ||
238 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN), | ||
239 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF) | ||
240 | }, | ||
241 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES), | ||
242 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2), | ||
243 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN), | ||
244 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF) | ||
245 | }, | ||
246 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES), | ||
247 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2), | ||
248 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN), | ||
249 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF) | ||
250 | }, | ||
251 | { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES), | ||
252 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2), | ||
253 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN), | ||
254 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF) | ||
255 | } | ||
256 | }; | ||
257 | |||
258 | /* Total number of entries: */ | ||
259 | #define ITLB_TOTAL_ENTRIES \ | ||
260 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \ | ||
261 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \ | ||
262 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \ | ||
263 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \ | ||
264 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \ | ||
265 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \ | ||
266 | XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES) | ||
267 | #define DTLB_TOTAL_ENTRIES \ | ||
268 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \ | ||
269 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \ | ||
270 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \ | ||
271 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \ | ||
272 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \ | ||
273 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \ | ||
274 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \ | ||
275 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \ | ||
276 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \ | ||
277 | XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES) | ||
278 | |||
279 | |||
280 | typedef struct { | ||
281 | unsigned va; | ||
282 | unsigned pa; | ||
283 | unsigned char asid; | ||
284 | unsigned char ca; | ||
285 | unsigned char way; | ||
286 | unsigned char index; | ||
287 | unsigned char pgsz_log2; /* 0 .. 32 */ | ||
288 | unsigned char type; /* 0=ITLB 1=DTLB */ | ||
289 | } tlb_dump_entry_t; | ||
290 | |||
291 | /* Return -1 if a precedes b, +1 if a follows b, 0 if same: */ | ||
292 | int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b ) | ||
293 | { | ||
294 | if (a->asid < b->asid) return -1; | ||
295 | if (a->asid > b->asid) return 1; | ||
296 | if (a->va < b->va) return -1; | ||
297 | if (a->va > b->va) return 1; | ||
298 | if (a->pa < b->pa) return -1; | ||
299 | if (a->pa > b->pa) return 1; | ||
300 | if (a->ca < b->ca) return -1; | ||
301 | if (a->ca > b->ca) return 1; | ||
302 | if (a->way < b->way) return -1; | ||
303 | if (a->way > b->way) return 1; | ||
304 | if (a->index < b->index) return -1; | ||
305 | if (a->index > b->index) return 1; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | void sort_tlb_dump_info( tlb_dump_entry_t *t, int n ) | ||
310 | { | ||
311 | int i, j; | ||
312 | /* Simple O(n*n) sort: */ | ||
313 | for (i = 0; i < n-1; i++) | ||
314 | for (j = i+1; j < n; j++) | ||
315 | if (cmp_tlb_dump_info(t+i, t+j) > 0) { | ||
316 | tlb_dump_entry_t tmp = t[i]; | ||
317 | t[i] = t[j]; | ||
318 | t[j] = tmp; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | |||
323 | static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES]; | ||
324 | static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES]; | ||
325 | |||
326 | |||
327 | static inline char *way_type (int type) | ||
328 | { | ||
329 | return type ? "autorefill" : "non-autorefill"; | ||
330 | } | ||
331 | |||
332 | void print_entry (struct way_config_t *way_info, | ||
333 | unsigned int way, | ||
334 | unsigned int index, | ||
335 | unsigned int virtual, | ||
336 | unsigned int translation) | ||
337 | { | ||
338 | char valid_chr; | ||
339 | unsigned int va, pa, asid, ca; | ||
340 | |||
341 | va = virtual & | ||
342 | ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1); | ||
343 | asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1); | ||
344 | pa = translation & ~((1 << way_info->pgsz_log2) - 1); | ||
345 | ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1); | ||
346 | valid_chr = asid ? 'V' : 'I'; | ||
347 | |||
348 | /* Compute and incorporate the effect of the index bits on the | ||
349 | * va. It's more useful for kernel debugging, since we always | ||
350 | * want to know the effective va anyway. */ | ||
351 | |||
352 | va += index << way_info->pgsz_log2; | ||
353 | |||
354 | printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n", | ||
355 | way, index, valid_chr, va, pa, asid, ca); | ||
356 | } | ||
357 | |||
358 | void print_itlb_entry (struct way_config_t *way_info, int way, int index) | ||
359 | { | ||
360 | print_entry (way_info, way, index, | ||
361 | read_itlb_virtual (way + (index << way_info->pgsz_log2)), | ||
362 | read_itlb_translation (way + (index << way_info->pgsz_log2))); | ||
363 | } | ||
364 | |||
365 | void print_dtlb_entry (struct way_config_t *way_info, int way, int index) | ||
366 | { | ||
367 | print_entry (way_info, way, index, | ||
368 | read_dtlb_virtual (way + (index << way_info->pgsz_log2)), | ||
369 | read_dtlb_translation (way + (index << way_info->pgsz_log2))); | ||
370 | } | ||
371 | |||
372 | void dump_itlb (void) | ||
373 | { | ||
374 | int way, index; | ||
375 | |||
376 | printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS); | ||
377 | |||
378 | for (way = 0; way < XCHAL_ITLB_WAYS; way++) { | ||
379 | printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n", | ||
380 | way, itlb[way].indicies, | ||
381 | itlb[way].pgsz_log2, way_type(itlb[way].arf)); | ||
382 | for (index = 0; index < itlb[way].indicies; index++) { | ||
383 | print_itlb_entry(&itlb[way], way, index); | ||
384 | } | ||
385 | } | ||
386 | } | ||
387 | |||
388 | void dump_dtlb (void) | ||
389 | { | ||
390 | int way, index; | ||
391 | |||
392 | printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS); | ||
393 | |||
394 | for (way = 0; way < XCHAL_DTLB_WAYS; way++) { | ||
395 | printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n", | ||
396 | way, dtlb[way].indicies, | ||
397 | dtlb[way].pgsz_log2, way_type(dtlb[way].arf)); | ||
398 | for (index = 0; index < dtlb[way].indicies; index++) { | ||
399 | print_dtlb_entry(&dtlb[way], way, index); | ||
400 | } | ||
401 | } | ||
402 | } | ||
403 | |||
404 | void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config, | ||
405 | int entries, int ways, int type, int show_invalid) | ||
406 | { | ||
407 | tlb_dump_entry_t *e = tinfo; | ||
408 | int way, i; | ||
409 | |||
410 | /* Gather all info: */ | ||
411 | for (way = 0; way < ways; way++) { | ||
412 | struct way_config_t *cfg = config + way; | ||
413 | for (i = 0; i < cfg->indicies; i++) { | ||
414 | unsigned wayindex = way + (i << cfg->pgsz_log2); | ||
415 | unsigned vv = (type ? read_dtlb_virtual (wayindex) | ||
416 | : read_itlb_virtual (wayindex)); | ||
417 | unsigned pp = (type ? read_dtlb_translation (wayindex) | ||
418 | : read_itlb_translation (wayindex)); | ||
419 | |||
420 | /* Compute and incorporate the effect of the index bits on the | ||
421 | * va. It's more useful for kernel debugging, since we always | ||
422 | * want to know the effective va anyway. */ | ||
423 | |||
424 | e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1)); | ||
425 | e->va += (i << cfg->pgsz_log2); | ||
426 | e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1)); | ||
427 | e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1)); | ||
428 | e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1)); | ||
429 | e->way = way; | ||
430 | e->index = i; | ||
431 | e->pgsz_log2 = cfg->pgsz_log2; | ||
432 | e->type = type; | ||
433 | e++; | ||
434 | } | ||
435 | } | ||
436 | #if 1 | ||
437 | /* Sort by ASID and VADDR: */ | ||
438 | sort_tlb_dump_info (tinfo, entries); | ||
439 | #endif | ||
440 | |||
441 | /* Display all sorted info: */ | ||
442 | printk ("\n%cTLB dump:\n", (type ? 'D' : 'I')); | ||
443 | for (e = tinfo, i = 0; i < entries; i++, e++) { | ||
444 | #if 0 | ||
445 | if (e->asid == 0 && !show_invalid) | ||
446 | continue; | ||
447 | #endif | ||
448 | printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n", | ||
449 | (e->type ? 'D' : 'I'), e->way, e->index, | ||
450 | e->asid, e->va, e->pa, e->ca, | ||
451 | (1 << (e->pgsz_log2 % 10)), | ||
452 | " kMG"[e->pgsz_log2 / 10] | ||
453 | ); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | void dump_tlbs2 (int showinv) | ||
458 | { | ||
459 | dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv); | ||
460 | dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv); | ||
461 | } | ||
462 | |||
463 | void dump_all_tlbs (void) | ||
464 | { | ||
465 | dump_tlbs2 (1); | ||
466 | } | ||
467 | |||
468 | void dump_valid_tlbs (void) | ||
469 | { | ||
470 | dump_tlbs2 (0); | ||
471 | } | ||
472 | |||
473 | |||
474 | void dump_tlbs (void) | ||
475 | { | ||
476 | dump_itlb(); | ||
477 | dump_dtlb(); | ||
478 | } | ||
479 | |||
480 | void dump_cache_tag(int dcache, int idx) | ||
481 | { | ||
482 | int w, i, s, e; | ||
483 | unsigned long tag, index; | ||
484 | unsigned long num_lines, num_ways, cache_size, line_size; | ||
485 | |||
486 | num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS; | ||
487 | cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE; | ||
488 | line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE; | ||
489 | |||
490 | num_lines = cache_size / num_ways; | ||
491 | |||
492 | s = 0; e = num_lines; | ||
493 | |||
494 | if (idx >= 0) | ||
495 | e = (s = idx * line_size) + 1; | ||
496 | |||
497 | for (i = s; i < e; i+= line_size) { | ||
498 | printk("\nline %#08x:", i); | ||
499 | for (w = 0; w < num_ways; w++) { | ||
500 | index = w * num_lines + i; | ||
501 | if (dcache) | ||
502 | __asm__ __volatile__("ldct %0, %1\n\t" | ||
503 | : "=a"(tag) : "a"(index)); | ||
504 | else | ||
505 | __asm__ __volatile__("lict %0, %1\n\t" | ||
506 | : "=a"(tag) : "a"(index)); | ||
507 | |||
508 | printk(" %#010lx", tag); | ||
509 | } | ||
510 | } | ||
511 | printk ("\n"); | ||
512 | } | ||
513 | |||
514 | void dump_icache(int index) | ||
515 | { | ||
516 | unsigned long data, addr; | ||
517 | int w, i; | ||
518 | |||
519 | const unsigned long num_ways = XCHAL_ICACHE_WAYS; | ||
520 | const unsigned long cache_size = XCHAL_ICACHE_SIZE; | ||
521 | const unsigned long line_size = XCHAL_ICACHE_LINESIZE; | ||
522 | const unsigned long num_lines = cache_size / num_ways / line_size; | ||
523 | |||
524 | for (w = 0; w < num_ways; w++) { | ||
525 | printk ("\nWay %d", w); | ||
526 | |||
527 | for (i = 0; i < line_size; i+= 4) { | ||
528 | addr = w * num_lines + index * line_size + i; | ||
529 | __asm__ __volatile__("licw %0, %1\n\t" | ||
530 | : "=a"(data) : "a"(addr)); | ||
531 | printk(" %#010lx", data); | ||
532 | } | ||
533 | } | ||
534 | printk ("\n"); | ||
535 | } | ||
536 | |||
537 | void dump_cache_tags(void) | ||
538 | { | ||
539 | printk("Instruction cache\n"); | ||
540 | dump_cache_tag(0, -1); | ||
541 | printk("Data cache\n"); | ||
542 | dump_cache_tag(1, -1); | ||
543 | } | ||
544 | |||
545 | #endif | ||