aboutsummaryrefslogtreecommitdiffstats
path: root/arch/score/mm
diff options
context:
space:
mode:
authorChen Liqin <liqin.chen@sunplusct.com>2009-06-12 10:01:00 -0400
committerArnd Bergmann <arnd@arndb.de>2009-06-19 05:38:47 -0400
commit6bc9a3966f0395419b09b2ec90f89f7f00341b37 (patch)
tree9c0d9d5376020266f5602501c8376d4a4f13142d /arch/score/mm
parent0732f87761dbe417cb6e084b712d07e879e876ef (diff)
score: Add support for Sunplus S+core architecture
This is the complete set of new arch Score's files for linux. Score instruction set support 16bits, 32bits and 64bits instruction, Score SOC had been used in game machine and LCD TV. Signed-off-by: Chen Liqin <liqin.chen@sunplusct.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/score/mm')
-rw-r--r--arch/score/mm/Makefile6
-rw-r--r--arch/score/mm/cache.c308
-rw-r--r--arch/score/mm/extable.c38
-rw-r--r--arch/score/mm/fault.c235
-rw-r--r--arch/score/mm/init.c173
-rw-r--r--arch/score/mm/pgtable.c60
-rw-r--r--arch/score/mm/tlb-miss.S199
-rw-r--r--arch/score/mm/tlb-score.c251
8 files changed, 1270 insertions, 0 deletions
diff --git a/arch/score/mm/Makefile b/arch/score/mm/Makefile
new file mode 100644
index 000000000000..7b1e29b1f8cd
--- /dev/null
+++ b/arch/score/mm/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the Linux/SCORE-specific parts of the memory manager.
3#
4
5obj-y += cache.o extable.o fault.o init.o \
6 tlb-miss.o tlb-score.o pgtable.o
diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c
new file mode 100644
index 000000000000..1ebc67f18c6d
--- /dev/null
+++ b/arch/score/mm/cache.c
@@ -0,0 +1,308 @@
1/*
2 * arch/score/mm/cache.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/init.h>
27#include <linux/linkage.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32
33#include <asm/mmu_context.h>
34
35/* Cache operations. */
36void (*flush_cache_all)(void);
37void (*__flush_cache_all)(void);
38void (*flush_cache_mm)(struct mm_struct *mm);
39void (*flush_cache_range)(struct vm_area_struct *vma,
40 unsigned long start, unsigned long end);
41void (*flush_cache_page)(struct vm_area_struct *vma,
42 unsigned long page, unsigned long pfn);
43void (*flush_icache_range)(unsigned long start, unsigned long end);
44void (*__flush_cache_vmap)(void);
45void (*__flush_cache_vunmap)(void);
46void (*flush_cache_sigtramp)(unsigned long addr);
47void (*flush_data_cache_page)(unsigned long addr);
48EXPORT_SYMBOL(flush_data_cache_page);
49void (*flush_icache_all)(void);
50
51/*Score 7 cache operations*/
52static inline void s7___flush_cache_all(void);
53static void s7_flush_cache_mm(struct mm_struct *mm);
54static void s7_flush_cache_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end);
56static void s7_flush_cache_page(struct vm_area_struct *vma,
57 unsigned long page, unsigned long pfn);
58static void s7_flush_icache_range(unsigned long start, unsigned long end);
59static void s7_flush_cache_sigtramp(unsigned long addr);
60static void s7_flush_data_cache_page(unsigned long addr);
61static void s7_flush_dcache_range(unsigned long start, unsigned long end);
62
63void __update_cache(struct vm_area_struct *vma, unsigned long address,
64 pte_t pte)
65{
66 struct page *page;
67 unsigned long pfn, addr;
68 int exec = (vma->vm_flags & VM_EXEC);
69
70 pfn = pte_pfn(pte);
71 if (unlikely(!pfn_valid(pfn)))
72 return;
73 page = pfn_to_page(pfn);
74 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
75 addr = (unsigned long) page_address(page);
76 if (exec)
77 s7_flush_data_cache_page(addr);
78 clear_bit(PG_arch_1, &page->flags);
79 }
80}
81
82static inline void setup_protection_map(void)
83{
84 protection_map[0] = PAGE_NONE;
85 protection_map[1] = PAGE_READONLY;
86 protection_map[2] = PAGE_COPY;
87 protection_map[3] = PAGE_COPY;
88 protection_map[4] = PAGE_READONLY;
89 protection_map[5] = PAGE_READONLY;
90 protection_map[6] = PAGE_COPY;
91 protection_map[7] = PAGE_COPY;
92 protection_map[8] = PAGE_NONE;
93 protection_map[9] = PAGE_READONLY;
94 protection_map[10] = PAGE_SHARED;
95 protection_map[11] = PAGE_SHARED;
96 protection_map[12] = PAGE_READONLY;
97 protection_map[13] = PAGE_READONLY;
98 protection_map[14] = PAGE_SHARED;
99 protection_map[15] = PAGE_SHARED;
100}
101
102void __devinit cpu_cache_init(void)
103{
104 flush_cache_all = s7_flush_cache_all;
105 __flush_cache_all = s7___flush_cache_all;
106 flush_cache_mm = s7_flush_cache_mm;
107 flush_cache_range = s7_flush_cache_range;
108 flush_cache_page = s7_flush_cache_page;
109 flush_icache_range = s7_flush_icache_range;
110 flush_cache_sigtramp = s7_flush_cache_sigtramp;
111 flush_data_cache_page = s7_flush_data_cache_page;
112
113 setup_protection_map();
114}
115
116void s7_flush_icache_all(void)
117{
118 __asm__ __volatile__(
119 "la r8, s7_flush_icache_all\n"
120 "cache 0x10, [r8, 0]\n"
121 "nop\nnop\nnop\nnop\nnop\nnop\n"
122 : : : "r8");
123}
124
125void s7_flush_dcache_all(void)
126{
127 __asm__ __volatile__(
128 "la r8, s7_flush_dcache_all\n"
129 "cache 0x1f, [r8, 0]\n"
130 "nop\nnop\nnop\nnop\nnop\nnop\n"
131 "cache 0x1a, [r8, 0]\n"
132 "nop\nnop\nnop\nnop\nnop\nnop\n"
133 : : : "r8");
134}
135
136void s7_flush_cache_all(void)
137{
138 __asm__ __volatile__(
139 "la r8, s7_flush_cache_all\n"
140 "cache 0x10, [r8, 0]\n"
141 "nop\nnop\nnop\nnop\nnop\nnop\n"
142 "cache 0x1f, [r8, 0]\n"
143 "nop\nnop\nnop\nnop\nnop\nnop\n"
144 "cache 0x1a, [r8, 0]\n"
145 "nop\nnop\nnop\nnop\nnop\nnop\n"
146 : : : "r8");
147}
148
149void s7___flush_cache_all(void)
150{
151 __asm__ __volatile__(
152 "la r8, s7_flush_cache_all\n"
153 "cache 0x10, [r8, 0]\n"
154 "nop\nnop\nnop\nnop\nnop\nnop\n"
155 "cache 0x1f, [r8, 0]\n"
156 "nop\nnop\nnop\nnop\nnop\nnop\n"
157 "cache 0x1a, [r8, 0]\n"
158 "nop\nnop\nnop\nnop\nnop\nnop\n"
159 : : : "r8");
160}
161
162static void s7_flush_cache_mm(struct mm_struct *mm)
163{
164 if (!(mm->context))
165 return;
166 s7_flush_cache_all();
167}
168
169/*if we flush a range precisely , the processing may be very long.
170We must check each page in the range whether present. If the page is present,
171we can flush the range in the page. Be careful, the range may be cross two
172page, a page is present and another is not present.
173*/
174/*
175The interface is provided in hopes that the port can find
176a suitably efficient method for removing multiple page
177sized regions from the cache.
178*/
179static void
180s7_flush_cache_range(struct vm_area_struct *vma,
181 unsigned long start, unsigned long end)
182{
183 struct mm_struct *mm = vma->vm_mm;
184 int exec = vma->vm_flags & VM_EXEC;
185 pgd_t *pgdp;
186 pud_t *pudp;
187 pmd_t *pmdp;
188 pte_t *ptep;
189
190 if (!(mm->context))
191 return;
192
193 pgdp = pgd_offset(mm, start);
194 pudp = pud_offset(pgdp, start);
195 pmdp = pmd_offset(pudp, start);
196 ptep = pte_offset(pmdp, start);
197
198 while (start <= end) {
199 unsigned long tmpend;
200 pgdp = pgd_offset(mm, start);
201 pudp = pud_offset(pgdp, start);
202 pmdp = pmd_offset(pudp, start);
203 ptep = pte_offset(pmdp, start);
204
205 if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
206 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
207 continue;
208 }
209 tmpend = (start | (PAGE_SIZE-1)) > end ?
210 end : (start | (PAGE_SIZE-1));
211
212 s7_flush_dcache_range(start, tmpend);
213 if (exec)
214 s7_flush_icache_range(start, tmpend);
215 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
216 }
217}
218
219static void
220s7_flush_cache_page(struct vm_area_struct *vma,
221 unsigned long addr, unsigned long pfn)
222{
223 int exec = vma->vm_flags & VM_EXEC;
224 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
225
226 s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
227
228 if (exec)
229 s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
230}
231
232static void s7_flush_cache_sigtramp(unsigned long addr)
233{
234 __asm__ __volatile__(
235 "cache 0x02, [%0, 0]\n"
236 "nop\nnop\nnop\nnop\nnop\n"
237 "cache 0x02, [%0, 0x4]\n"
238 "nop\nnop\nnop\nnop\nnop\n"
239
240 "cache 0x0d, [%0, 0]\n"
241 "nop\nnop\nnop\nnop\nnop\n"
242 "cache 0x0d, [%0, 0x4]\n"
243 "nop\nnop\nnop\nnop\nnop\n"
244
245 "cache 0x1a, [%0, 0]\n"
246 "nop\nnop\nnop\nnop\nnop\n"
247 : : "r" (addr));
248}
249
250/*
251Just flush entire Dcache!!
252You must ensure the page doesn't include instructions, because
253the function will not flush the Icache.
254The addr must be cache aligned.
255*/
256static void s7_flush_data_cache_page(unsigned long addr)
257{
258 unsigned int i;
259 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
260 __asm__ __volatile__(
261 "cache 0x0e, [%0, 0]\n"
262 "cache 0x1a, [%0, 0]\n"
263 "nop\n"
264 : : "r" (addr));
265 addr += L1_CACHE_BYTES;
266 }
267}
268
269/*
2701. WB and invalid a cache line of Dcache
2712. Drain Write Buffer
272the range must be smaller than PAGE_SIZE
273*/
274static void s7_flush_dcache_range(unsigned long start, unsigned long end)
275{
276 int size, i;
277
278 start = start & ~(L1_CACHE_BYTES - 1);
279 end = end & ~(L1_CACHE_BYTES - 1);
280 size = end - start;
281 /* flush dcache to ram, and invalidate dcache lines. */
282 for (i = 0; i < size; i += L1_CACHE_BYTES) {
283 __asm__ __volatile__(
284 "cache 0x0e, [%0, 0]\n"
285 "nop\nnop\nnop\nnop\nnop\n"
286 "cache 0x1a, [%0, 0]\n"
287 "nop\nnop\nnop\nnop\nnop\n"
288 : : "r" (start));
289 start += L1_CACHE_BYTES;
290 }
291}
292
293static void s7_flush_icache_range(unsigned long start, unsigned long end)
294{
295 int size, i;
296 start = start & ~(L1_CACHE_BYTES - 1);
297 end = end & ~(L1_CACHE_BYTES - 1);
298
299 size = end - start;
300 /* invalidate icache lines. */
301 for (i = 0; i < size; i += L1_CACHE_BYTES) {
302 __asm__ __volatile__(
303 "cache 0x02, [%0, 0]\n"
304 "nop\nnop\nnop\nnop\nnop\n"
305 : : "r" (start));
306 start += L1_CACHE_BYTES;
307 }
308}
diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c
new file mode 100644
index 000000000000..01ff6445171c
--- /dev/null
+++ b/arch/score/mm/extable.c
@@ -0,0 +1,38 @@
1/*
2 * arch/score/mm/extable.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/module.h>
27
28int fixup_exception(struct pt_regs *regs)
29{
30 const struct exception_table_entry *fixup;
31
32 fixup = search_exception_tables(regs->cp0_epc);
33 if (fixup) {
34 regs->cp0_epc = fixup->fixup;
35 return 1;
36 }
37 return 0;
38}
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
new file mode 100644
index 000000000000..47b600e4b2c5
--- /dev/null
+++ b/arch/score/mm/fault.c
@@ -0,0 +1,235 @@
1/*
2 * arch/score/mm/fault.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/mman.h>
31#include <linux/module.h>
32#include <linux/signal.h>
33#include <linux/sched.h>
34#include <linux/string.h>
35#include <linux/types.h>
36#include <linux/ptrace.h>
37
38/*
39 * This routine handles page faults. It determines the address,
40 * and the problem, and then passes it off to one of the appropriate
41 * routines.
42 */
43asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
44 unsigned long address)
45{
46 struct vm_area_struct *vma = NULL;
47 struct task_struct *tsk = current;
48 struct mm_struct *mm = tsk->mm;
49 const int field = sizeof(unsigned long) * 2;
50 siginfo_t info;
51 int fault;
52
53 info.si_code = SEGV_MAPERR;
54
55 /*
56 * We fault-in kernel-space virtual memory on-demand. The
57 * 'reference' page table is init_mm.pgd.
58 *
59 * NOTE! We MUST NOT take any locks for this case. We may
60 * be in an interrupt or a critical region, and should
61 * only copy the information from the master page table,
62 * nothing more.
63 */
64 if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
65 goto vmalloc_fault;
66#ifdef MODULE_START
67 if (unlikely(address >= MODULE_START && address < MODULE_END))
68 goto vmalloc_fault;
69#endif
70
71 /*
72 * If we're in an interrupt or have no user
73 * context, we must not take the fault..
74 */
75 if (in_atomic() || !mm)
76 goto bad_area_nosemaphore;
77
78 down_read(&mm->mmap_sem);
79 vma = find_vma(mm, address);
80 if (!vma)
81 goto bad_area;
82 if (vma->vm_start <= address)
83 goto good_area;
84 if (!(vma->vm_flags & VM_GROWSDOWN))
85 goto bad_area;
86 if (expand_stack(vma, address))
87 goto bad_area;
88 /*
89 * Ok, we have a good vm_area for this memory access, so
90 * we can handle it..
91 */
92good_area:
93 info.si_code = SEGV_ACCERR;
94
95 if (write) {
96 if (!(vma->vm_flags & VM_WRITE))
97 goto bad_area;
98 } else {
99 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
100 goto bad_area;
101 }
102
103survive:
104 /*
105 * If for any reason at all we couldn't handle the fault,
106 * make sure we exit gracefully rather than endlessly redo
107 * the fault.
108 */
109 fault = handle_mm_fault(mm, vma, address, write);
110 if (unlikely(fault & VM_FAULT_ERROR)) {
111 if (fault & VM_FAULT_OOM)
112 goto out_of_memory;
113 else if (fault & VM_FAULT_SIGBUS)
114 goto do_sigbus;
115 BUG();
116 }
117 if (fault & VM_FAULT_MAJOR)
118 tsk->maj_flt++;
119 else
120 tsk->min_flt++;
121
122 up_read(&mm->mmap_sem);
123 return;
124
125 /*
126 * Something tried to access memory that isn't in our memory map..
127 * Fix it, but check if it's kernel or user first..
128 */
129bad_area:
130 up_read(&mm->mmap_sem);
131
132bad_area_nosemaphore:
133 /* User mode accesses just cause a SIGSEGV */
134 if (user_mode(regs)) {
135 tsk->thread.cp0_badvaddr = address;
136 tsk->thread.error_code = write;
137 info.si_signo = SIGSEGV;
138 info.si_errno = 0;
139 /* info.si_code has been set above */
140 info.si_addr = (void __user *) address;
141 force_sig_info(SIGSEGV, &info, tsk);
142 return;
143 }
144
145no_context:
146 /* Are we prepared to handle this kernel fault? */
147 if (fixup_exception(regs)) {
148 current->thread.cp0_baduaddr = address;
149 return;
150 }
151
152 /*
153 * Oops. The kernel tried to access some bad page. We'll have to
154 * terminate things with extreme prejudice.
155 */
156 bust_spinlocks(1);
157
158 printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
159 "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
160 0, field, address, field, regs->cp0_epc,
161 field, regs->regs[3]);
162 die("Oops", regs);
163
164 /*
165 * We ran out of memory, or some other thing happened to us that made
166 * us unable to handle the page fault gracefully.
167 */
168out_of_memory:
169 up_read(&mm->mmap_sem);
170 if (is_global_init(tsk)) {
171 yield();
172 down_read(&mm->mmap_sem);
173 goto survive;
174 }
175 printk("VM: killing process %s\n", tsk->comm);
176 if (user_mode(regs))
177 do_group_exit(SIGKILL);
178 goto no_context;
179
180do_sigbus:
181 up_read(&mm->mmap_sem);
182 /* Kernel mode? Handle exceptions or die */
183 if (!user_mode(regs))
184 goto no_context;
185 else
186 /*
187 * Send a sigbus, regardless of whether we were in kernel
188 * or user mode.
189 */
190 tsk->thread.cp0_badvaddr = address;
191 info.si_signo = SIGBUS;
192 info.si_errno = 0;
193 info.si_code = BUS_ADRERR;
194 info.si_addr = (void __user *) address;
195 force_sig_info(SIGBUS, &info, tsk);
196 return;
197vmalloc_fault:
198 {
199 /*
200 * Synchronize this task's top level page-table
201 * with the 'reference' page table.
202 *
203 * Do _not_ use "tsk" here. We might be inside
204 * an interrupt in the middle of a task switch..
205 */
206 int offset = __pgd_offset(address);
207 pgd_t *pgd, *pgd_k;
208 pud_t *pud, *pud_k;
209 pmd_t *pmd, *pmd_k;
210 pte_t *pte_k;
211
212 pgd = (pgd_t *) pgd_current + offset;
213 pgd_k = init_mm.pgd + offset;
214
215 if (!pgd_present(*pgd_k))
216 goto no_context;
217 set_pgd(pgd, *pgd_k);
218
219 pud = pud_offset(pgd, address);
220 pud_k = pud_offset(pgd_k, address);
221 if (!pud_present(*pud_k))
222 goto no_context;
223
224 pmd = pmd_offset(pud, address);
225 pmd_k = pmd_offset(pud_k, address);
226 if (!pmd_present(*pmd_k))
227 goto no_context;
228 set_pmd(pmd, *pmd_k);
229
230 pte_k = pte_offset_kernel(pmd_k, address);
231 if (!pte_present(*pte_k))
232 goto no_context;
233 return;
234 }
235}
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
new file mode 100644
index 000000000000..7780eecc5a43
--- /dev/null
+++ b/arch/score/mm/init.c
@@ -0,0 +1,173 @@
1/*
2 * arch/score/mm/init.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/errno.h>
27#include <linux/bootmem.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/mm.h>
31#include <linux/mman.h>
32#include <linux/pagemap.h>
33#include <linux/proc_fs.h>
34#include <linux/sched.h>
35#include <asm-generic/sections.h>
36
37#include <asm/tlb.h>
38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
41/*
42 * We have up to 8 empty zeroed pages so we can map one of the right colour
43 * when needed.
44 */
45unsigned long zero_page_mask;
46unsigned long empty_zero_page;
47EXPORT_SYMBOL_GPL(empty_zero_page);
48
49static struct kcore_list kcore_mem, kcore_vmalloc;
50
51unsigned long setup_zero_pages(void)
52{
53 unsigned int order = 0;
54 unsigned long size;
55 struct page *page;
56
57 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
58 if (!empty_zero_page)
59 panic("Oh boy, that early out of memory?");
60
61 page = virt_to_page((void *) empty_zero_page);
62 split_page(page, order);
63 while (page < virt_to_page((void *) (empty_zero_page +
64 (PAGE_SIZE << order)))) {
65 SetPageReserved(page);
66 page++;
67 }
68
69 size = PAGE_SIZE << order;
70 zero_page_mask = (size - 1) & PAGE_MASK;
71
72 return 1UL << order;
73}
74
75#ifndef CONFIG_NEED_MULTIPLE_NODES
76static int __init page_is_ram(unsigned long pagenr)
77{
78 if (pagenr >= min_low_pfn && pagenr < max_low_pfn)
79 return 1;
80 else
81 return 0;
82}
83
84void __init paging_init(void)
85{
86 unsigned long max_zone_pfns[MAX_NR_ZONES];
87 unsigned long lastpfn;
88
89 pagetable_init();
90 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
91 lastpfn = max_low_pfn;
92 free_area_init_nodes(max_zone_pfns);
93}
94
95void __init mem_init(void)
96{
97 unsigned long codesize, reservedpages, datasize, initsize;
98 unsigned long tmp, ram = 0;
99
100 max_mapnr = max_low_pfn;
101 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
102 totalram_pages += free_all_bootmem();
103 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
104 reservedpages = 0;
105
106 for (tmp = 0; tmp < max_low_pfn; tmp++)
107 if (page_is_ram(tmp)) {
108 ram++;
109 if (PageReserved(pfn_to_page(tmp)))
110 reservedpages++;
111 }
112
113 num_physpages = ram;
114 codesize = (unsigned long) &_etext - (unsigned long) &_text;
115 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
116 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
117
118 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
119 kclist_add(&kcore_vmalloc, (void *) VMALLOC_START,
120 VMALLOC_END - VMALLOC_START);
121
122 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
123 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
124 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
125 ram << (PAGE_SHIFT-10), codesize >> 10,
126 reservedpages << (PAGE_SHIFT-10), datasize >> 10,
127 initsize >> 10,
128 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
129}
130#endif /* !CONFIG_NEED_MULTIPLE_NODES */
131
132void free_init_pages(const char *what, unsigned long begin, unsigned long end)
133{
134 unsigned long pfn;
135
136 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
137 struct page *page = pfn_to_page(pfn);
138 void *addr = phys_to_virt(PFN_PHYS(pfn));
139
140 ClearPageReserved(page);
141 init_page_count(page);
142 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
143 __free_page(page);
144 totalram_pages++;
145 }
146 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
147}
148
149#ifdef CONFIG_BLK_DEV_INITRD
150void free_initrd_mem(unsigned long start, unsigned long end)
151{
152 free_init_pages("initrd memory",
153 virt_to_phys((void *) start),
154 virt_to_phys((void *) end));
155}
156#endif
157
158void __init_refok free_initmem(void)
159{
160 free_init_pages("unused kernel memory",
161 (unsigned long)__init_begin, (unsigned long)__init_end);
162}
163
164unsigned long pgd_current;
165
166#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
167
168/*
169 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
170 * are constants. So we use the variants from asm-offset.h until that gcc
171 * will officially be retired.
172 */
173pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/score/mm/pgtable.c b/arch/score/mm/pgtable.c
new file mode 100644
index 000000000000..10b0962f83d4
--- /dev/null
+++ b/arch/score/mm/pgtable.c
@@ -0,0 +1,60 @@
1/*
2 * arch/score/mm/pgtable-32.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/bootmem.h>
27#include <linux/init.h>
28#include <linux/pfn.h>
29#include <linux/mm.h>
30
31void pgd_init(unsigned long page)
32{
33 unsigned long *p = (unsigned long *) page;
34 int i;
35
36 for (i = 0; i < USER_PTRS_PER_PGD; i += 8) {
37 p[i + 0] = (unsigned long) invalid_pte_table;
38 p[i + 1] = (unsigned long) invalid_pte_table;
39 p[i + 2] = (unsigned long) invalid_pte_table;
40 p[i + 3] = (unsigned long) invalid_pte_table;
41 p[i + 4] = (unsigned long) invalid_pte_table;
42 p[i + 5] = (unsigned long) invalid_pte_table;
43 p[i + 6] = (unsigned long) invalid_pte_table;
44 p[i + 7] = (unsigned long) invalid_pte_table;
45 }
46}
47
48void __init pagetable_init(void)
49{
50 unsigned long vaddr;
51 pgd_t *pgd_base;
52
53 /* Initialize the entire pgd. */
54 pgd_init((unsigned long) swapper_pg_dir);
55 pgd_init((unsigned long) swapper_pg_dir
56 + sizeof(pgd_t) * USER_PTRS_PER_PGD);
57
58 pgd_base = swapper_pg_dir;
59 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
60}
diff --git a/arch/score/mm/tlb-miss.S b/arch/score/mm/tlb-miss.S
new file mode 100644
index 000000000000..f27651914e8d
--- /dev/null
+++ b/arch/score/mm/tlb-miss.S
@@ -0,0 +1,199 @@
1/*
2 * arch/score/mm/tlbex.S
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <asm/asmmacro.h>
27#include <asm/pgtable-bits.h>
28#include <asm/scoreregs.h>
29
30/*
31* After this macro runs, the pte faulted on is
32* in register PTE, a ptr into the table in which
33* the pte belongs is in PTR.
34*/
35 .macro load_pte, pte, ptr
36 la \ptr, pgd_current
37 lw \ptr, [\ptr, 0]
38 mfcr \pte, cr6
39 srli \pte, \pte, 22
40 slli \pte, \pte, 2
41 add \ptr, \ptr, \pte
42 lw \ptr, [\ptr, 0]
43 mfcr \pte, cr6
44 srli \pte, \pte, 10
45 andi \pte, 0xffc
46 add \ptr, \ptr, \pte
47 lw \pte, [\ptr, 0]
48 .endm
49
50 .macro pte_reload, ptr
51 lw \ptr, [\ptr, 0]
52 mtcr \ptr, cr12
53 nop
54 nop
55 nop
56 nop
57 nop
58 .endm
59
60 .macro do_fault, write
61 SAVE_ALL
62 mfcr r6, cr6
63 mv r4, r0
64 ldi r5, \write
65 la r8, do_page_fault
66 brl r8
67 j ret_from_exception
68 .endm
69
70 .macro pte_writable, pte, ptr, label
71 andi \pte, 0x280
72 cmpi.c \pte, 0x280
73 bne \label
74 lw \pte, [\ptr, 0] /*reload PTE*/
75 .endm
76
77/*
78 * Make PTE writable, update software status bits as well,
79 * then store at PTR.
80 */
81 .macro pte_makewrite, pte, ptr
82 ori \pte, 0x426
83 sw \pte, [\ptr, 0]
84 .endm
85
86 .text
87ENTRY(score7_FTLB_refill_Handler)
88 la r31, pgd_current /* get pgd pointer */
89 lw r31, [r31, 0] /* get the address of PGD */
90 mfcr r30, cr6
91 srli r30, r30, 22 /* PGDIR_SHIFT = 22*/
92 slli r30, r30, 2
93 add r31, r31, r30
94 lw r31, [r31, 0] /* get the address of the start address of PTE table */
95
96 mfcr r30, cr9
97 andi r30, 0xfff /* equivalent to get PET index and right shift 2 bits */
98 add r31, r31, r30
99 lw r30, [r31, 0] /* load pte entry */
100 mtcr r30, cr12
101 nop
102 nop
103 nop
104 nop
105 nop
106 mtrtlb
107 nop
108 nop
109 nop
110 nop
111 nop
112 rte /* 6 cycles to make sure tlb entry works */
113
114ENTRY(score7_KSEG_refill_Handler)
115 la r31, pgd_current /* get pgd pointer */
116 lw r31, [r31, 0] /* get the address of PGD */
117 mfcr r30, cr6
118 srli r30, r30, 22 /* PGDIR_SHIFT = 22 */
119 slli r30, r30, 2
120 add r31, r31, r30
121 lw r31, [r31, 0] /* get the address of the start address of PTE table */
122
123 mfcr r30, cr6 /* get Bad VPN */
124 srli r30, r30, 10
125 andi r30, 0xffc /* PTE VPN mask (bit 11~2) */
126
127 add r31, r31, r30
128 lw r30, [r31, 0] /* load pte entry */
129 mtcr r30, cr12
130 nop
131 nop
132 nop
133 nop
134 nop
135 mtrtlb
136 nop
137 nop
138 nop
139 nop
140 nop
141 rte /* 6 cycles to make sure tlb entry works */
142
143nopage_tlbl:
144 do_fault 0 /* Read */
145
146ENTRY(handle_tlb_refill)
147 load_pte r30, r31
148 pte_writable r30, r31, handle_tlb_refill_nopage
149 pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */
150 pte_reload r31
151 mtrtlb
152 nop
153 nop
154 nop
155 nop
156 nop
157 rte
158handle_tlb_refill_nopage:
159 do_fault 0 /* Read */
160
161ENTRY(handle_tlb_invaild)
162 load_pte r30, r31
163 stlb /* find faulting entry */
164 pte_writable r30, r31, handle_tlb_invaild_nopage
165 pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */
166 pte_reload r31
167 mtptlb
168 nop
169 nop
170 nop
171 nop
172 nop
173 rte
174handle_tlb_invaild_nopage:
175 do_fault 0 /* Read */
176
177ENTRY(handle_mod)
178 load_pte r30, r31
179 stlb /* find faulting entry */
180 andi r30, _PAGE_WRITE /* Writable? */
181 cmpz.c r30
182 beq nowrite_mod
183 lw r30, [r31, 0] /* reload into r30 */
184
185 /* Present and writable bits set, set accessed and dirty bits. */
186 pte_makewrite r30, r31
187
188 /* Now reload the entry into the tlb. */
189 pte_reload r31
190 mtptlb
191 nop
192 nop
193 nop
194 nop
195 nop
196 rte
197
198nowrite_mod:
199 do_fault 1 /* Write */
diff --git a/arch/score/mm/tlb-score.c b/arch/score/mm/tlb-score.c
new file mode 100644
index 000000000000..4fa5aa5afecc
--- /dev/null
+++ b/arch/score/mm/tlb-score.c
@@ -0,0 +1,251 @@
1/*
2 * arch/score/mm/tlb-score.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/highmem.h>
27#include <linux/module.h>
28
29#include <asm/irq.h>
30#include <asm/mmu_context.h>
31#include <asm/tlb.h>
32
33#define TLBSIZE 32
34
35unsigned long asid_cache = ASID_FIRST_VERSION;
36EXPORT_SYMBOL(asid_cache);
37
38void local_flush_tlb_all(void)
39{
40 unsigned long flags;
41 unsigned long old_ASID;
42 int entry;
43
44 local_irq_save(flags);
45 old_ASID = pevn_get() & ASID_MASK;
46 pectx_set(0); /* invalid */
47 entry = tlblock_get(); /* skip locked entries*/
48
49 for (; entry < TLBSIZE; entry++) {
50 tlbpt_set(entry);
51 pevn_set(KSEG1);
52 barrier();
53 tlb_write_indexed();
54 }
55 pevn_set(old_ASID);
56 local_irq_restore(flags);
57}
58
59/*
60 * If mm is currently active_mm, we can't really drop it. Instead,
61 * we will get a new one for it.
62 */
63static inline void
64drop_mmu_context(struct mm_struct *mm)
65{
66 unsigned long flags;
67
68 local_irq_save(flags);
69 get_new_mmu_context(mm);
70 pevn_set(mm->context & ASID_MASK);
71 local_irq_restore(flags);
72}
73
74void local_flush_tlb_mm(struct mm_struct *mm)
75{
76 if (mm->context != 0)
77 drop_mmu_context(mm);
78}
79
80void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
81 unsigned long end)
82{
83 struct mm_struct *mm = vma->vm_mm;
84 unsigned long vma_mm_context = mm->context;
85 if (mm->context != 0) {
86 unsigned long flags;
87 int size;
88
89 local_irq_save(flags);
90 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
91 if (size <= TLBSIZE) {
92 int oldpid = pevn_get() & ASID_MASK;
93 int newpid = vma_mm_context & ASID_MASK;
94
95 start &= PAGE_MASK;
96 end += (PAGE_SIZE - 1);
97 end &= PAGE_MASK;
98 while (start < end) {
99 int idx;
100
101 pevn_set(start | newpid);
102 start += PAGE_SIZE;
103 barrier();
104 tlb_probe();
105 idx = tlbpt_get();
106 pectx_set(0);
107 pevn_set(KSEG1);
108 if (idx < 0)
109 continue;
110 tlb_write_indexed();
111 }
112 pevn_set(oldpid);
113 } else {
114 /* Bigger than TLBSIZE, get new ASID directly */
115 get_new_mmu_context(mm);
116 if (mm == current->active_mm)
117 pevn_set(vma_mm_context & ASID_MASK);
118 }
119 local_irq_restore(flags);
120 }
121}
122
123void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
124{
125 unsigned long flags;
126 int size;
127
128 local_irq_save(flags);
129 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
130 if (size <= TLBSIZE) {
131 int pid = pevn_get();
132
133 start &= PAGE_MASK;
134 end += PAGE_SIZE - 1;
135 end &= PAGE_MASK;
136
137 while (start < end) {
138 long idx;
139
140 pevn_set(start);
141 start += PAGE_SIZE;
142 tlb_probe();
143 idx = tlbpt_get();
144 if (idx < 0)
145 continue;
146 pectx_set(0);
147 pevn_set(KSEG1);
148 barrier();
149 tlb_write_indexed();
150 }
151 pevn_set(pid);
152 } else {
153 local_flush_tlb_all();
154 }
155
156 local_irq_restore(flags);
157}
158
159void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
160{
161 if (!vma || vma->vm_mm->context != 0) {
162 unsigned long flags;
163 int oldpid, newpid, idx;
164 unsigned long vma_ASID = vma->vm_mm->context;
165
166 newpid = vma_ASID & ASID_MASK;
167 page &= PAGE_MASK;
168 local_irq_save(flags);
169 oldpid = pevn_get() & ASID_MASK;
170 pevn_set(page | newpid);
171 barrier();
172 tlb_probe();
173 idx = tlbpt_get();
174 pectx_set(0);
175 pevn_set(KSEG1);
176 if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/
177 goto finish;
178 barrier();
179 tlb_write_indexed();
180finish:
181 pevn_set(oldpid);
182 local_irq_restore(flags);
183 }
184}
185
186/*
187 * This one is only used for pages with the global bit set so we don't care
188 * much about the ASID.
189 */
190void local_flush_tlb_one(unsigned long page)
191{
192 unsigned long flags;
193 int oldpid, idx;
194
195 local_irq_save(flags);
196 oldpid = pevn_get();
197 page &= (PAGE_MASK << 1);
198 pevn_set(page);
199 barrier();
200 tlb_probe();
201 idx = tlbpt_get();
202 pectx_set(0);
203 if (idx >= 0) {
204 /* Make sure all entries differ. */
205 pevn_set(KSEG1);
206 barrier();
207 tlb_write_indexed();
208 }
209 pevn_set(oldpid);
210 local_irq_restore(flags);
211}
212
213void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
214{
215 unsigned long flags;
216 int idx, pid;
217
218 /*
219 * Handle debugger faulting in for debugee.
220 */
221 if (current->active_mm != vma->vm_mm)
222 return;
223
224 pid = pevn_get() & ASID_MASK;
225
226 local_irq_save(flags);
227 address &= PAGE_MASK;
228 pevn_set(address | pid);
229 barrier();
230 tlb_probe();
231 idx = tlbpt_get();
232 pectx_set(pte_val(pte));
233 pevn_set(address | pid);
234 if (idx < 0)
235 tlb_write_random();
236 else
237 tlb_write_indexed();
238
239 pevn_set(pid);
240 local_irq_restore(flags);
241}
242
243void __cpuinit tlb_init(void)
244{
245 tlblock_set(0);
246 local_flush_tlb_all();
247 memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100),
248 &score7_FTLB_refill_Handler, 0xFC);
249 flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100,
250 EXCEPTION_VECTOR_BASE_ADDR + 0x1FC);
251}