aboutsummaryrefslogtreecommitdiffstats
path: root/arch/unicore32
diff options
context:
space:
mode:
Diffstat (limited to 'arch/unicore32')
-rw-r--r--arch/unicore32/include/asm/mmu.h17
-rw-r--r--arch/unicore32/include/asm/mmu_context.h87
-rw-r--r--arch/unicore32/include/asm/pgalloc.h110
-rw-r--r--arch/unicore32/include/asm/pgtable-hwdef.h55
-rw-r--r--arch/unicore32/include/asm/pgtable.h317
-rw-r--r--arch/unicore32/mm/alignment.c523
-rw-r--r--arch/unicore32/mm/extable.c24
-rw-r--r--arch/unicore32/mm/fault.c479
-rw-r--r--arch/unicore32/mm/mmu.c533
-rw-r--r--arch/unicore32/mm/pgd.c102
10 files changed, 2247 insertions, 0 deletions
diff --git a/arch/unicore32/include/asm/mmu.h b/arch/unicore32/include/asm/mmu.h
new file mode 100644
index 000000000000..66fa341dc2c6
--- /dev/null
+++ b/arch/unicore32/include/asm/mmu.h
@@ -0,0 +1,17 @@
1/*
2 * linux/arch/unicore32/include/asm/mmu.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_MMU_H__
13#define __UNICORE_MMU_H__
14
15typedef unsigned long mm_context_t;
16
17#endif
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
new file mode 100644
index 000000000000..fb5e4c658f7a
--- /dev/null
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -0,0 +1,87 @@
1/*
2 * linux/arch/unicore32/include/asm/mmu_context.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_MMU_CONTEXT_H__
13#define __UNICORE_MMU_CONTEXT_H__
14
15#include <linux/compiler.h>
16#include <linux/sched.h>
17#include <linux/io.h>
18
19#include <asm/cacheflush.h>
20#include <asm/cpu-single.h>
21
22#define init_new_context(tsk, mm) 0
23
24#define destroy_context(mm) do { } while (0)
25
26/*
27 * This is called when "tsk" is about to enter lazy TLB mode.
28 *
29 * mm: describes the currently active mm context
30 * tsk: task which is entering lazy tlb
31 * cpu: cpu number which is entering lazy tlb
32 *
33 * tsk->mm will be NULL
34 */
35static inline void
36enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
37{
38}
39
40/*
41 * This is the actual mm switch as far as the scheduler
42 * is concerned. No registers are touched. We avoid
43 * calling the CPU specific function when the mm hasn't
44 * actually changed.
45 */
46static inline void
47switch_mm(struct mm_struct *prev, struct mm_struct *next,
48 struct task_struct *tsk)
49{
50 unsigned int cpu = smp_processor_id();
51
52 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
53 cpu_switch_mm(next->pgd, next);
54}
55
56#define deactivate_mm(tsk, mm) do { } while (0)
57#define activate_mm(prev, next) switch_mm(prev, next, NULL)
58
59/*
60 * We are inserting a "fake" vma for the user-accessible vector page so
61 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
62 * But we also want to remove it before the generic code gets to see it
63 * during process exit or the unmapping of it would cause total havoc.
64 * (the macro is used as remove_vma() is static to mm/mmap.c)
65 */
66#define arch_exit_mmap(mm) \
67do { \
68 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
69 if (high_vma) { \
70 BUG_ON(high_vma->vm_next); /* it should be last */ \
71 if (high_vma->vm_prev) \
72 high_vma->vm_prev->vm_next = NULL; \
73 else \
74 mm->mmap = NULL; \
75 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
76 mm->mmap_cache = NULL; \
77 mm->map_count--; \
78 remove_vma(high_vma); \
79 } \
80} while (0)
81
82static inline void arch_dup_mmap(struct mm_struct *oldmm,
83 struct mm_struct *mm)
84{
85}
86
87#endif
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
new file mode 100644
index 000000000000..0213e373a895
--- /dev/null
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -0,0 +1,110 @@
1/*
2 * linux/arch/unicore32/include/asm/pgalloc.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_PGALLOC_H__
13#define __UNICORE_PGALLOC_H__
14
15#include <asm/pgtable-hwdef.h>
16#include <asm/processor.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19
20#define check_pgt_cache() do { } while (0)
21
22#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
23#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
24
25extern pgd_t *get_pgd_slow(struct mm_struct *mm);
26extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
27
28#define pgd_alloc(mm) get_pgd_slow(mm)
29#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
30
31#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
32
33/*
34 * Allocate one PTE table.
35 */
36static inline pte_t *
37pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
38{
39 pte_t *pte;
40
41 pte = (pte_t *)__get_free_page(PGALLOC_GFP);
42 if (pte)
43 clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
44
45 return pte;
46}
47
48static inline pgtable_t
49pte_alloc_one(struct mm_struct *mm, unsigned long addr)
50{
51 struct page *pte;
52
53 pte = alloc_pages(PGALLOC_GFP, 0);
54 if (pte) {
55 if (!PageHighMem(pte)) {
56 void *page = page_address(pte);
57 clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
58 }
59 pgtable_page_ctor(pte);
60 }
61
62 return pte;
63}
64
65/*
66 * Free one PTE table.
67 */
68static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
69{
70 if (pte)
71 free_page((unsigned long)pte);
72}
73
74static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
75{
76 pgtable_page_dtor(pte);
77 __free_page(pte);
78}
79
80static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
81{
82 set_pmd(pmdp, __pmd(pmdval));
83 flush_pmd_entry(pmdp);
84}
85
86/*
87 * Populate the pmdp entry with a pointer to the pte. This pmd is part
88 * of the mm address space.
89 */
90static inline void
91pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
92{
93 unsigned long pte_ptr = (unsigned long)ptep;
94
95 /*
96 * The pmd must be loaded with the physical
97 * address of the PTE table
98 */
99 __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
100}
101
102static inline void
103pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
104{
105 __pmd_populate(pmdp,
106 page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
107}
108#define pmd_pgtable(pmd) pmd_page(pmd)
109
110#endif
diff --git a/arch/unicore32/include/asm/pgtable-hwdef.h b/arch/unicore32/include/asm/pgtable-hwdef.h
new file mode 100644
index 000000000000..7314e859cca0
--- /dev/null
+++ b/arch/unicore32/include/asm/pgtable-hwdef.h
@@ -0,0 +1,55 @@
1/*
2 * linux/arch/unicore32/include/asm/pgtable-hwdef.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_PGTABLE_HWDEF_H__
13#define __UNICORE_PGTABLE_HWDEF_H__
14
15/*
16 * Hardware page table definitions.
17 *
18 * + Level 1 descriptor (PMD)
19 * - common
20 */
21#define PMD_TYPE_MASK (3 << 0)
22#define PMD_TYPE_TABLE (0 << 0)
23/*#define PMD_TYPE_LARGE (1 << 0) */
24#define PMD_TYPE_INVALID (2 << 0)
25#define PMD_TYPE_SECT (3 << 0)
26
27#define PMD_PRESENT (1 << 2)
28#define PMD_YOUNG (1 << 3)
29
30/*#define PMD_SECT_DIRTY (1 << 4) */
31#define PMD_SECT_CACHEABLE (1 << 5)
32#define PMD_SECT_EXEC (1 << 6)
33#define PMD_SECT_WRITE (1 << 7)
34#define PMD_SECT_READ (1 << 8)
35
36/*
37 * + Level 2 descriptor (PTE)
38 * - common
39 */
40#define PTE_TYPE_MASK (3 << 0)
41#define PTE_TYPE_SMALL (0 << 0)
42#define PTE_TYPE_MIDDLE (1 << 0)
43#define PTE_TYPE_LARGE (2 << 0)
44#define PTE_TYPE_INVALID (3 << 0)
45
46#define PTE_PRESENT (1 << 2)
47#define PTE_FILE (1 << 3) /* only when !PRESENT */
48#define PTE_YOUNG (1 << 3)
49#define PTE_DIRTY (1 << 4)
50#define PTE_CACHEABLE (1 << 5)
51#define PTE_EXEC (1 << 6)
52#define PTE_WRITE (1 << 7)
53#define PTE_READ (1 << 8)
54
55#endif
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
new file mode 100644
index 000000000000..68b2f297ac97
--- /dev/null
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -0,0 +1,317 @@
1/*
2 * linux/arch/unicore32/include/asm/pgtable.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_PGTABLE_H__
13#define __UNICORE_PGTABLE_H__
14
15#include <asm-generic/pgtable-nopmd.h>
16#include <asm/cpu-single.h>
17
18#include <asm/memory.h>
19#include <asm/pgtable-hwdef.h>
20
21/*
22 * Just any arbitrary offset to the start of the vmalloc VM area: the
23 * current 8MB value just means that there will be a 8MB "hole" after the
24 * physical memory until the kernel virtual memory starts. That means that
25 * any out-of-bounds memory accesses will hopefully be caught.
26 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
27 * area for the same reason. ;)
28 *
29 * Note that platforms may override VMALLOC_START, but they must provide
30 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
31 * which may not overlap IO space.
32 */
33#ifndef VMALLOC_START
34#define VMALLOC_OFFSET SZ_8M
35#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) \
36 & ~(VMALLOC_OFFSET-1))
37#define VMALLOC_END (0xff000000UL)
38#endif
39
40#define PTRS_PER_PTE 1024
41#define PTRS_PER_PGD 1024
42
43/*
44 * PGDIR_SHIFT determines what a third-level page table entry can map
45 */
46#define PGDIR_SHIFT 22
47
48#ifndef __ASSEMBLY__
49extern void __pte_error(const char *file, int line, unsigned long val);
50extern void __pgd_error(const char *file, int line, unsigned long val);
51
52#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
53#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
54#endif /* !__ASSEMBLY__ */
55
56#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
57#define PGDIR_MASK (~(PGDIR_SIZE-1))
58
59/*
60 * This is the lowest virtual address we can permit any user space
61 * mapping to be mapped at. This is particularly important for
62 * non-high vector CPUs.
63 */
64#define FIRST_USER_ADDRESS PAGE_SIZE
65
66#define FIRST_USER_PGD_NR 1
67#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
68
69/*
70 * section address mask and size definitions.
71 */
72#define SECTION_SHIFT 22
73#define SECTION_SIZE (1UL << SECTION_SHIFT)
74#define SECTION_MASK (~(SECTION_SIZE-1))
75
76#ifndef __ASSEMBLY__
77
78/*
79 * The pgprot_* and protection_map entries will be fixed up in runtime
80 * to include the cachable bits based on memory policy, as well as any
81 * architecture dependent bits.
82 */
83#define _PTE_DEFAULT (PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE)
84
85extern pgprot_t pgprot_user;
86extern pgprot_t pgprot_kernel;
87
88#define PAGE_NONE pgprot_user
89#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \
90 | PTE_WRITE)
91#define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
92 | PTE_WRITE \
93 | PTE_EXEC)
94#define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ)
95#define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
96 | PTE_EXEC)
97#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ)
98#define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
99 | PTE_EXEC)
100#define PAGE_KERNEL pgprot_kernel
101#define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
102
103#define __PAGE_NONE __pgprot(_PTE_DEFAULT)
104#define __PAGE_SHARED __pgprot(_PTE_DEFAULT | PTE_READ \
105 | PTE_WRITE)
106#define __PAGE_SHARED_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
107 | PTE_WRITE \
108 | PTE_EXEC)
109#define __PAGE_COPY __pgprot(_PTE_DEFAULT | PTE_READ)
110#define __PAGE_COPY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
111 | PTE_EXEC)
112#define __PAGE_READONLY __pgprot(_PTE_DEFAULT | PTE_READ)
113#define __PAGE_READONLY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
114 | PTE_EXEC)
115
116#endif /* __ASSEMBLY__ */
117
118/*
119 * The table below defines the page protection levels that we insert into our
120 * Linux page table version. These get translated into the best that the
121 * architecture can perform. Note that on UniCore hardware:
122 * 1) We cannot do execute protection
123 * 2) If we could do execute protection, then read is implied
124 * 3) write implies read permissions
125 */
126#define __P000 __PAGE_NONE
127#define __P001 __PAGE_READONLY
128#define __P010 __PAGE_COPY
129#define __P011 __PAGE_COPY
130#define __P100 __PAGE_READONLY_EXEC
131#define __P101 __PAGE_READONLY_EXEC
132#define __P110 __PAGE_COPY_EXEC
133#define __P111 __PAGE_COPY_EXEC
134
135#define __S000 __PAGE_NONE
136#define __S001 __PAGE_READONLY
137#define __S010 __PAGE_SHARED
138#define __S011 __PAGE_SHARED
139#define __S100 __PAGE_READONLY_EXEC
140#define __S101 __PAGE_READONLY_EXEC
141#define __S110 __PAGE_SHARED_EXEC
142#define __S111 __PAGE_SHARED_EXEC
143
144#ifndef __ASSEMBLY__
145/*
146 * ZERO_PAGE is a global shared page that is always zero: used
147 * for zero-mapped memory areas etc..
148 */
149extern struct page *empty_zero_page;
150#define ZERO_PAGE(vaddr) (empty_zero_page)
151
152#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
153#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) \
154 | pgprot_val(prot)))
155
156#define pte_none(pte) (!pte_val(pte))
157#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0))
158#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
159#define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \
160 + __pte_index(addr))
161
162#define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \
163 + __pte_index(addr))
164#define pte_unmap(pte) do { } while (0)
165
166#define set_pte(ptep, pte) cpu_set_pte(ptep, pte)
167
168#define set_pte_at(mm, addr, ptep, pteval) \
169 do { \
170 set_pte(ptep, pteval); \
171 } while (0)
172
173/*
174 * The following only work if pte_present() is true.
175 * Undefined behaviour if not..
176 */
177#define pte_present(pte) (pte_val(pte) & PTE_PRESENT)
178#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
179#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
180#define pte_young(pte) (pte_val(pte) & PTE_YOUNG)
181#define pte_exec(pte) (pte_val(pte) & PTE_EXEC)
182#define pte_special(pte) (0)
183
184#define PTE_BIT_FUNC(fn, op) \
185static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
186
187PTE_BIT_FUNC(wrprotect, &= ~PTE_WRITE);
188PTE_BIT_FUNC(mkwrite, |= PTE_WRITE);
189PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
190PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
191PTE_BIT_FUNC(mkold, &= ~PTE_YOUNG);
192PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG);
193
194static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
195
196/*
197 * Mark the prot value as uncacheable.
198 */
199#define pgprot_noncached(prot) \
200 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
201#define pgprot_writecombine(prot) \
202 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
203#define pgprot_dmacoherent(prot) \
204 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
205
206#define pmd_none(pmd) (!pmd_val(pmd))
207#define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT)
208#define pmd_bad(pmd) (((pmd_val(pmd) & \
209 (PMD_PRESENT | PMD_TYPE_MASK)) \
210 != (PMD_PRESENT | PMD_TYPE_TABLE)))
211
212#define set_pmd(pmdpd, pmdval) \
213 do { \
214 *(pmdpd) = pmdval; \
215 } while (0)
216
217#define pmd_clear(pmdp) \
218 do { \
219 set_pmd(pmdp, __pmd(0));\
220 clean_pmd_entry(pmdp); \
221 } while (0)
222
223#define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK))
224#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
225
226/*
227 * Conversion functions: convert a page and protection to a page entry,
228 * and a page entry and page directory to the page they refer to.
229 */
230#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
231
232/* to find an entry in a page-table-directory */
233#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
234
235#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
236
237/* to find an entry in a kernel page-table-directory */
238#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
239
240/* Find an entry in the third-level page table.. */
241#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
242
243static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
244{
245 const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ;
246 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
247 return pte;
248}
249
250extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
251
252/*
253 * Encode and decode a swap entry. Swap entries are stored in the Linux
254 * page tables as follows:
255 *
256 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
257 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
258 * <--------------- offset --------------> <--- type --> 0 0 0 0 0
259 *
260 * This gives us up to 127 swap files and 32GB per swap file. Note that
261 * the offset field is always non-zero.
262 */
263#define __SWP_TYPE_SHIFT 5
264#define __SWP_TYPE_BITS 7
265#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
266#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
267
268#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) \
269 & __SWP_TYPE_MASK)
270#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
271#define __swp_entry(type, offset) ((swp_entry_t) { \
272 ((type) << __SWP_TYPE_SHIFT) | \
273 ((offset) << __SWP_OFFSET_SHIFT) })
274
275#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
276#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
277
278/*
279 * It is an error for the kernel to have more swap files than we can
280 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
281 * is increased beyond what we presently support.
282 */
283#define MAX_SWAPFILES_CHECK() \
284 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
285
286/*
287 * Encode and decode a file entry. File entries are stored in the Linux
288 * page tables as follows:
289 *
290 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
291 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
292 * <----------------------- offset ----------------------> 1 0 0 0
293 */
294#define pte_file(pte) (pte_val(pte) & PTE_FILE)
295#define pte_to_pgoff(x) (pte_val(x) >> 4)
296#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
297
298#define PTE_FILE_MAX_BITS 28
299
300/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
301/* FIXME: this is not correct */
302#define kern_addr_valid(addr) (1)
303
304#include <asm-generic/pgtable.h>
305
306/*
307 * remap a physical page `pfn' of size `size' with page protection `prot'
308 * into virtual address `from'
309 */
310#define io_remap_pfn_range(vma, from, pfn, size, prot) \
311 remap_pfn_range(vma, from, pfn, size, prot)
312
313#define pgtable_cache_init() do { } while (0)
314
315#endif /* !__ASSEMBLY__ */
316
317#endif /* __UNICORE_PGTABLE_H__ */
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
new file mode 100644
index 000000000000..28f576d733ee
--- /dev/null
+++ b/arch/unicore32/mm/alignment.c
@@ -0,0 +1,523 @@
1/*
2 * linux/arch/unicore32/mm/alignment.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/*
13 * TODO:
14 * FPU ldm/stm not handling
15 */
16#include <linux/compiler.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/uaccess.h>
23
24#include <asm/tlbflush.h>
25#include <asm/unaligned.h>
26
27#define CODING_BITS(i) (i & 0xe0000120)
28
29#define LDST_P_BIT(i) (i & (1 << 28)) /* Preindex */
30#define LDST_U_BIT(i) (i & (1 << 27)) /* Add offset */
31#define LDST_W_BIT(i) (i & (1 << 25)) /* Writeback */
32#define LDST_L_BIT(i) (i & (1 << 24)) /* Load */
33
34#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 27)) == 0)
35
36#define LDSTH_I_BIT(i) (i & (1 << 26)) /* half-word immed */
37#define LDM_S_BIT(i) (i & (1 << 26)) /* write ASR from BSR */
38#define LDM_H_BIT(i) (i & (1 << 6)) /* select r0-r15 or r16-r31 */
39
40#define RN_BITS(i) ((i >> 19) & 31) /* Rn */
41#define RD_BITS(i) ((i >> 14) & 31) /* Rd */
42#define RM_BITS(i) (i & 31) /* Rm */
43
44#define REGMASK_BITS(i) (((i & 0x7fe00) >> 3) | (i & 0x3f))
45#define OFFSET_BITS(i) (i & 0x03fff)
46
47#define SHIFT_BITS(i) ((i >> 9) & 0x1f)
48#define SHIFT_TYPE(i) (i & 0xc0)
49#define SHIFT_LSL 0x00
50#define SHIFT_LSR 0x40
51#define SHIFT_ASR 0x80
52#define SHIFT_RORRRX 0xc0
53
54union offset_union {
55 unsigned long un;
56 signed long sn;
57};
58
59#define TYPE_ERROR 0
60#define TYPE_FAULT 1
61#define TYPE_LDST 2
62#define TYPE_DONE 3
63#define TYPE_SWAP 4
64#define TYPE_COLS 5 /* Coprocessor load/store */
65
66#define get8_unaligned_check(val, addr, err) \
67 __asm__( \
68 "1: ldb.u %1, [%2], #1\n" \
69 "2:\n" \
70 " .pushsection .fixup,\"ax\"\n" \
71 " .align 2\n" \
72 "3: mov %0, #1\n" \
73 " b 2b\n" \
74 " .popsection\n" \
75 " .pushsection __ex_table,\"a\"\n" \
76 " .align 3\n" \
77 " .long 1b, 3b\n" \
78 " .popsection\n" \
79 : "=r" (err), "=&r" (val), "=r" (addr) \
80 : "0" (err), "2" (addr))
81
82#define get8t_unaligned_check(val, addr, err) \
83 __asm__( \
84 "1: ldb.u %1, [%2], #1\n" \
85 "2:\n" \
86 " .pushsection .fixup,\"ax\"\n" \
87 " .align 2\n" \
88 "3: mov %0, #1\n" \
89 " b 2b\n" \
90 " .popsection\n" \
91 " .pushsection __ex_table,\"a\"\n" \
92 " .align 3\n" \
93 " .long 1b, 3b\n" \
94 " .popsection\n" \
95 : "=r" (err), "=&r" (val), "=r" (addr) \
96 : "0" (err), "2" (addr))
97
98#define get16_unaligned_check(val, addr) \
99 do { \
100 unsigned int err = 0, v, a = addr; \
101 get8_unaligned_check(val, a, err); \
102 get8_unaligned_check(v, a, err); \
103 val |= v << 8; \
104 if (err) \
105 goto fault; \
106 } while (0)
107
108#define put16_unaligned_check(val, addr) \
109 do { \
110 unsigned int err = 0, v = val, a = addr; \
111 __asm__( \
112 "1: stb.u %1, [%2], #1\n" \
113 " mov %1, %1 >> #8\n" \
114 "2: stb.u %1, [%2]\n" \
115 "3:\n" \
116 " .pushsection .fixup,\"ax\"\n" \
117 " .align 2\n" \
118 "4: mov %0, #1\n" \
119 " b 3b\n" \
120 " .popsection\n" \
121 " .pushsection __ex_table,\"a\"\n" \
122 " .align 3\n" \
123 " .long 1b, 4b\n" \
124 " .long 2b, 4b\n" \
125 " .popsection\n" \
126 : "=r" (err), "=&r" (v), "=&r" (a) \
127 : "0" (err), "1" (v), "2" (a)); \
128 if (err) \
129 goto fault; \
130 } while (0)
131
132#define __put32_unaligned_check(ins, val, addr) \
133 do { \
134 unsigned int err = 0, v = val, a = addr; \
135 __asm__( \
136 "1: "ins" %1, [%2], #1\n" \
137 " mov %1, %1 >> #8\n" \
138 "2: "ins" %1, [%2], #1\n" \
139 " mov %1, %1 >> #8\n" \
140 "3: "ins" %1, [%2], #1\n" \
141 " mov %1, %1 >> #8\n" \
142 "4: "ins" %1, [%2]\n" \
143 "5:\n" \
144 " .pushsection .fixup,\"ax\"\n" \
145 " .align 2\n" \
146 "6: mov %0, #1\n" \
147 " b 5b\n" \
148 " .popsection\n" \
149 " .pushsection __ex_table,\"a\"\n" \
150 " .align 3\n" \
151 " .long 1b, 6b\n" \
152 " .long 2b, 6b\n" \
153 " .long 3b, 6b\n" \
154 " .long 4b, 6b\n" \
155 " .popsection\n" \
156 : "=r" (err), "=&r" (v), "=&r" (a) \
157 : "0" (err), "1" (v), "2" (a)); \
158 if (err) \
159 goto fault; \
160 } while (0)
161
162#define get32_unaligned_check(val, addr) \
163 do { \
164 unsigned int err = 0, v, a = addr; \
165 get8_unaligned_check(val, a, err); \
166 get8_unaligned_check(v, a, err); \
167 val |= v << 8; \
168 get8_unaligned_check(v, a, err); \
169 val |= v << 16; \
170 get8_unaligned_check(v, a, err); \
171 val |= v << 24; \
172 if (err) \
173 goto fault; \
174 } while (0)
175
176#define put32_unaligned_check(val, addr) \
177 __put32_unaligned_check("stb.u", val, addr)
178
179#define get32t_unaligned_check(val, addr) \
180 do { \
181 unsigned int err = 0, v, a = addr; \
182 get8t_unaligned_check(val, a, err); \
183 get8t_unaligned_check(v, a, err); \
184 val |= v << 8; \
185 get8t_unaligned_check(v, a, err); \
186 val |= v << 16; \
187 get8t_unaligned_check(v, a, err); \
188 val |= v << 24; \
189 if (err) \
190 goto fault; \
191 } while (0)
192
193#define put32t_unaligned_check(val, addr) \
194 __put32_unaligned_check("stb.u", val, addr)
195
196static void
197do_alignment_finish_ldst(unsigned long addr, unsigned long instr,
198 struct pt_regs *regs, union offset_union offset)
199{
200 if (!LDST_U_BIT(instr))
201 offset.un = -offset.un;
202
203 if (!LDST_P_BIT(instr))
204 addr += offset.un;
205
206 if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
207 regs->uregs[RN_BITS(instr)] = addr;
208}
209
210static int
211do_alignment_ldrhstrh(unsigned long addr, unsigned long instr,
212 struct pt_regs *regs)
213{
214 unsigned int rd = RD_BITS(instr);
215
216 /* old value 0x40002120, can't judge swap instr correctly */
217 if ((instr & 0x4b003fe0) == 0x40000120)
218 goto swp;
219
220 if (LDST_L_BIT(instr)) {
221 unsigned long val;
222 get16_unaligned_check(val, addr);
223
224 /* signed half-word? */
225 if (instr & 0x80)
226 val = (signed long)((signed short)val);
227
228 regs->uregs[rd] = val;
229 } else
230 put16_unaligned_check(regs->uregs[rd], addr);
231
232 return TYPE_LDST;
233
234swp:
235 /* only handle swap word
236 * for swap byte should not active this alignment exception */
237 get32_unaligned_check(regs->uregs[RD_BITS(instr)], addr);
238 put32_unaligned_check(regs->uregs[RM_BITS(instr)], addr);
239 return TYPE_SWAP;
240
241fault:
242 return TYPE_FAULT;
243}
244
245static int
246do_alignment_ldrstr(unsigned long addr, unsigned long instr,
247 struct pt_regs *regs)
248{
249 unsigned int rd = RD_BITS(instr);
250
251 if (!LDST_P_BIT(instr) && LDST_W_BIT(instr))
252 goto trans;
253
254 if (LDST_L_BIT(instr))
255 get32_unaligned_check(regs->uregs[rd], addr);
256 else
257 put32_unaligned_check(regs->uregs[rd], addr);
258 return TYPE_LDST;
259
260trans:
261 if (LDST_L_BIT(instr))
262 get32t_unaligned_check(regs->uregs[rd], addr);
263 else
264 put32t_unaligned_check(regs->uregs[rd], addr);
265 return TYPE_LDST;
266
267fault:
268 return TYPE_FAULT;
269}
270
271/*
272 * LDM/STM alignment handler.
273 *
274 * There are 4 variants of this instruction:
275 *
276 * B = rn pointer before instruction, A = rn pointer after instruction
277 * ------ increasing address ----->
278 * | | r0 | r1 | ... | rx | |
279 * PU = 01 B A
280 * PU = 11 B A
281 * PU = 00 A B
282 * PU = 10 A B
283 */
284static int
285do_alignment_ldmstm(unsigned long addr, unsigned long instr,
286 struct pt_regs *regs)
287{
288 unsigned int rd, rn, pc_correction, reg_correction, nr_regs, regbits;
289 unsigned long eaddr, newaddr;
290
291 if (LDM_S_BIT(instr))
292 goto bad;
293
294 pc_correction = 4; /* processor implementation defined */
295
296 /* count the number of registers in the mask to be transferred */
297 nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
298
299 rn = RN_BITS(instr);
300 newaddr = eaddr = regs->uregs[rn];
301
302 if (!LDST_U_BIT(instr))
303 nr_regs = -nr_regs;
304 newaddr += nr_regs;
305 if (!LDST_U_BIT(instr))
306 eaddr = newaddr;
307
308 if (LDST_P_EQ_U(instr)) /* U = P */
309 eaddr += 4;
310
311 /*
312 * This is a "hint" - we already have eaddr worked out by the
313 * processor for us.
314 */
315 if (addr != eaddr) {
316 printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
317 "addr = %08lx, eaddr = %08lx\n",
318 instruction_pointer(regs), instr, addr, eaddr);
319 show_regs(regs);
320 }
321
322 if (LDM_H_BIT(instr))
323 reg_correction = 0x10;
324 else
325 reg_correction = 0x00;
326
327 for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
328 regbits >>= 1, rd += 1)
329 if (regbits & 1) {
330 if (LDST_L_BIT(instr))
331 get32_unaligned_check(regs->
332 uregs[rd + reg_correction], eaddr);
333 else
334 put32_unaligned_check(regs->
335 uregs[rd + reg_correction], eaddr);
336 eaddr += 4;
337 }
338
339 if (LDST_W_BIT(instr))
340 regs->uregs[rn] = newaddr;
341 return TYPE_DONE;
342
343fault:
344 regs->UCreg_pc -= pc_correction;
345 return TYPE_FAULT;
346
347bad:
348 printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
349 return TYPE_ERROR;
350}
351
352static int
353do_alignment(unsigned long addr, unsigned int error_code, struct pt_regs *regs)
354{
355 union offset_union offset;
356 unsigned long instr, instrptr;
357 int (*handler) (unsigned long addr, unsigned long instr,
358 struct pt_regs *regs);
359 unsigned int type;
360
361 instrptr = instruction_pointer(regs);
362 if (instrptr >= PAGE_OFFSET)
363 instr = *(unsigned long *)instrptr;
364 else {
365 __asm__ __volatile__(
366 "ldw.u %0, [%1]\n"
367 : "=&r"(instr)
368 : "r"(instrptr));
369 }
370
371 regs->UCreg_pc += 4;
372
373 switch (CODING_BITS(instr)) {
374 case 0x40000120: /* ldrh or strh */
375 if (LDSTH_I_BIT(instr))
376 offset.un = (instr & 0x3e00) >> 4 | (instr & 31);
377 else
378 offset.un = regs->uregs[RM_BITS(instr)];
379 handler = do_alignment_ldrhstrh;
380 break;
381
382 case 0x60000000: /* ldr or str immediate */
383 case 0x60000100: /* ldr or str immediate */
384 case 0x60000020: /* ldr or str immediate */
385 case 0x60000120: /* ldr or str immediate */
386 offset.un = OFFSET_BITS(instr);
387 handler = do_alignment_ldrstr;
388 break;
389
390 case 0x40000000: /* ldr or str register */
391 offset.un = regs->uregs[RM_BITS(instr)];
392 {
393 unsigned int shiftval = SHIFT_BITS(instr);
394
395 switch (SHIFT_TYPE(instr)) {
396 case SHIFT_LSL:
397 offset.un <<= shiftval;
398 break;
399
400 case SHIFT_LSR:
401 offset.un >>= shiftval;
402 break;
403
404 case SHIFT_ASR:
405 offset.sn >>= shiftval;
406 break;
407
408 case SHIFT_RORRRX:
409 if (shiftval == 0) {
410 offset.un >>= 1;
411 if (regs->UCreg_asr & PSR_C_BIT)
412 offset.un |= 1 << 31;
413 } else
414 offset.un = offset.un >> shiftval |
415 offset.un << (32 - shiftval);
416 break;
417 }
418 }
419 handler = do_alignment_ldrstr;
420 break;
421
422 case 0x80000000: /* ldm or stm */
423 case 0x80000020: /* ldm or stm */
424 handler = do_alignment_ldmstm;
425 break;
426
427 default:
428 goto bad;
429 }
430
431 type = handler(addr, instr, regs);
432
433 if (type == TYPE_ERROR || type == TYPE_FAULT)
434 goto bad_or_fault;
435
436 if (type == TYPE_LDST)
437 do_alignment_finish_ldst(addr, instr, regs, offset);
438
439 return 0;
440
441bad_or_fault:
442 if (type == TYPE_ERROR)
443 goto bad;
444 regs->UCreg_pc -= 4;
445 /*
446 * We got a fault - fix it up, or die.
447 */
448 do_bad_area(addr, error_code, regs);
449 return 0;
450
451bad:
452 /*
453 * Oops, we didn't handle the instruction.
454 * However, we must handle fpu instr firstly.
455 */
456#ifdef CONFIG_UNICORE_FPU_F64
457 /* handle co.load/store */
458#define CODING_COLS 0xc0000000
459#define COLS_OFFSET_BITS(i) (i & 0x1FF)
460#define COLS_L_BITS(i) (i & (1<<24))
461#define COLS_FN_BITS(i) ((i>>14) & 31)
462 if ((instr & 0xe0000000) == CODING_COLS) {
463 unsigned int fn = COLS_FN_BITS(instr);
464 unsigned long val = 0;
465 if (COLS_L_BITS(instr)) {
466 get32t_unaligned_check(val, addr);
467 switch (fn) {
468#define ASM_MTF(n) case n: \
469 __asm__ __volatile__("MTF %0, F" __stringify(n) \
470 : : "r"(val)); \
471 break;
472 ASM_MTF(0); ASM_MTF(1); ASM_MTF(2); ASM_MTF(3);
473 ASM_MTF(4); ASM_MTF(5); ASM_MTF(6); ASM_MTF(7);
474 ASM_MTF(8); ASM_MTF(9); ASM_MTF(10); ASM_MTF(11);
475 ASM_MTF(12); ASM_MTF(13); ASM_MTF(14); ASM_MTF(15);
476 ASM_MTF(16); ASM_MTF(17); ASM_MTF(18); ASM_MTF(19);
477 ASM_MTF(20); ASM_MTF(21); ASM_MTF(22); ASM_MTF(23);
478 ASM_MTF(24); ASM_MTF(25); ASM_MTF(26); ASM_MTF(27);
479 ASM_MTF(28); ASM_MTF(29); ASM_MTF(30); ASM_MTF(31);
480#undef ASM_MTF
481 }
482 } else {
483 switch (fn) {
484#define ASM_MFF(n) case n: \
485 __asm__ __volatile__("MFF %0, F" __stringify(n) \
486 : : "r"(val)); \
487 break;
488 ASM_MFF(0); ASM_MFF(1); ASM_MFF(2); ASM_MFF(3);
489 ASM_MFF(4); ASM_MFF(5); ASM_MFF(6); ASM_MFF(7);
490 ASM_MFF(8); ASM_MFF(9); ASM_MFF(10); ASM_MFF(11);
491 ASM_MFF(12); ASM_MFF(13); ASM_MFF(14); ASM_MFF(15);
492 ASM_MFF(16); ASM_MFF(17); ASM_MFF(18); ASM_MFF(19);
493 ASM_MFF(20); ASM_MFF(21); ASM_MFF(22); ASM_MFF(23);
494 ASM_MFF(24); ASM_MFF(25); ASM_MFF(26); ASM_MFF(27);
495 ASM_MFF(28); ASM_MFF(29); ASM_MFF(30); ASM_MFF(31);
496#undef ASM_MFF
497 }
498 put32t_unaligned_check(val, addr);
499 }
500 return TYPE_COLS;
501 }
502fault:
503 return TYPE_FAULT;
504#endif
505 printk(KERN_ERR "Alignment trap: not handling instruction "
506 "%08lx at [<%08lx>]\n", instr, instrptr);
507 return 1;
508}
509
510/*
511 * This needs to be done after sysctl_init, otherwise sys/ will be
512 * overwritten. Actually, this shouldn't be in sys/ at all since
513 * it isn't a sysctl, and it doesn't contain sysctl information.
514 */
515static int __init alignment_init(void)
516{
517 hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
518 "alignment exception");
519
520 return 0;
521}
522
523fs_initcall(alignment_init);
diff --git a/arch/unicore32/mm/extable.c b/arch/unicore32/mm/extable.c
new file mode 100644
index 000000000000..6564180eb285
--- /dev/null
+++ b/arch/unicore32/mm/extable.c
@@ -0,0 +1,24 @@
1/*
2 * linux/arch/unicore32/mm/extable.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/uaccess.h>
14
15int fixup_exception(struct pt_regs *regs)
16{
17 const struct exception_table_entry *fixup;
18
19 fixup = search_exception_tables(instruction_pointer(regs));
20 if (fixup)
21 regs->UCreg_pc = fixup->fixup;
22
23 return fixup != NULL;
24}
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
new file mode 100644
index 000000000000..283aa4b50b7a
--- /dev/null
+++ b/arch/unicore32/mm/fault.c
@@ -0,0 +1,479 @@
1/*
2 * linux/arch/unicore32/mm/fault.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/signal.h>
14#include <linux/mm.h>
15#include <linux/hardirq.h>
16#include <linux/init.h>
17#include <linux/kprobes.h>
18#include <linux/uaccess.h>
19#include <linux/page-flags.h>
20#include <linux/sched.h>
21#include <linux/io.h>
22
23#include <asm/system.h>
24#include <asm/pgtable.h>
25#include <asm/tlbflush.h>
26
27/*
28 * Fault status register encodings. We steal bit 31 for our own purposes.
29 */
30#define FSR_LNX_PF (1 << 31)
31
32static inline int fsr_fs(unsigned int fsr)
33{
34 /* xyabcde will be abcde+xy */
35 return (fsr & 31) + ((fsr & (3 << 5)) >> 5);
36}
37
38/*
39 * This is useful to dump out the page tables associated with
40 * 'addr' in mm 'mm'.
41 */
42void show_pte(struct mm_struct *mm, unsigned long addr)
43{
44 pgd_t *pgd;
45
46 if (!mm)
47 mm = &init_mm;
48
49 printk(KERN_ALERT "pgd = %p\n", mm->pgd);
50 pgd = pgd_offset(mm, addr);
51 printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
52
53 do {
54 pmd_t *pmd;
55 pte_t *pte;
56
57 if (pgd_none(*pgd))
58 break;
59
60 if (pgd_bad(*pgd)) {
61 printk("(bad)");
62 break;
63 }
64
65 pmd = pmd_offset((pud_t *) pgd, addr);
66 if (PTRS_PER_PMD != 1)
67 printk(", *pmd=%08lx", pmd_val(*pmd));
68
69 if (pmd_none(*pmd))
70 break;
71
72 if (pmd_bad(*pmd)) {
73 printk("(bad)");
74 break;
75 }
76
77 /* We must not map this if we have highmem enabled */
78 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
79 break;
80
81 pte = pte_offset_map(pmd, addr);
82 printk(", *pte=%08lx", pte_val(*pte));
83 pte_unmap(pte);
84 } while (0);
85
86 printk("\n");
87}
88
89/*
90 * Oops. The kernel tried to access some page that wasn't present.
91 */
92static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
93 unsigned int fsr, struct pt_regs *regs)
94{
95 /*
96 * Are we prepared to handle this kernel fault?
97 */
98 if (fixup_exception(regs))
99 return;
100
101 /*
102 * No handler, we'll have to terminate things with extreme prejudice.
103 */
104 bust_spinlocks(1);
105 printk(KERN_ALERT
106 "Unable to handle kernel %s at virtual address %08lx\n",
107 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
108 "paging request", addr);
109
110 show_pte(mm, addr);
111 die("Oops", regs, fsr);
112 bust_spinlocks(0);
113 do_exit(SIGKILL);
114}
115
116/*
117 * Something tried to access memory that isn't in our memory map..
118 * User mode accesses just cause a SIGSEGV
119 */
120static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
121 unsigned int fsr, unsigned int sig, int code,
122 struct pt_regs *regs)
123{
124 struct siginfo si;
125
126 tsk->thread.address = addr;
127 tsk->thread.error_code = fsr;
128 tsk->thread.trap_no = 14;
129 si.si_signo = sig;
130 si.si_errno = 0;
131 si.si_code = code;
132 si.si_addr = (void __user *)addr;
133 force_sig_info(sig, &si, tsk);
134}
135
136void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
137{
138 struct task_struct *tsk = current;
139 struct mm_struct *mm = tsk->active_mm;
140
141 /*
142 * If we are in kernel mode at this point, we
143 * have no context to handle this fault with.
144 */
145 if (user_mode(regs))
146 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
147 else
148 __do_kernel_fault(mm, addr, fsr, regs);
149}
150
151#define VM_FAULT_BADMAP 0x010000
152#define VM_FAULT_BADACCESS 0x020000
153
154/*
155 * Check that the permissions on the VMA allow for the fault which occurred.
156 * If we encountered a write fault, we must have write permission, otherwise
157 * we allow any permission.
158 */
159static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
160{
161 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
162
163 if (!(fsr ^ 0x12)) /* write? */
164 mask = VM_WRITE;
165 if (fsr & FSR_LNX_PF)
166 mask = VM_EXEC;
167
168 return vma->vm_flags & mask ? false : true;
169}
170
171static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
172 struct task_struct *tsk)
173{
174 struct vm_area_struct *vma;
175 int fault;
176
177 vma = find_vma(mm, addr);
178 fault = VM_FAULT_BADMAP;
179 if (unlikely(!vma))
180 goto out;
181 if (unlikely(vma->vm_start > addr))
182 goto check_stack;
183
184 /*
185 * Ok, we have a good vm_area for this
186 * memory access, so we can handle it.
187 */
188good_area:
189 if (access_error(fsr, vma)) {
190 fault = VM_FAULT_BADACCESS;
191 goto out;
192 }
193
194 /*
195 * If for any reason at all we couldn't handle the fault, make
196 * sure we exit gracefully rather than endlessly redo the fault.
197 */
198 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
199 (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
200 if (unlikely(fault & VM_FAULT_ERROR))
201 return fault;
202 if (fault & VM_FAULT_MAJOR)
203 tsk->maj_flt++;
204 else
205 tsk->min_flt++;
206 return fault;
207
208check_stack:
209 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
210 goto good_area;
211out:
212 return fault;
213}
214
215static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
216{
217 struct task_struct *tsk;
218 struct mm_struct *mm;
219 int fault, sig, code;
220
221 tsk = current;
222 mm = tsk->mm;
223
224 /*
225 * If we're in an interrupt or have no user
226 * context, we must not take the fault..
227 */
228 if (in_atomic() || !mm)
229 goto no_context;
230
231 /*
232 * As per x86, we may deadlock here. However, since the kernel only
233 * validly references user space from well defined areas of the code,
234 * we can bug out early if this is from code which shouldn't.
235 */
236 if (!down_read_trylock(&mm->mmap_sem)) {
237 if (!user_mode(regs)
238 && !search_exception_tables(regs->UCreg_pc))
239 goto no_context;
240 down_read(&mm->mmap_sem);
241 } else {
242 /*
243 * The above down_read_trylock() might have succeeded in
244 * which case, we'll have missed the might_sleep() from
245 * down_read()
246 */
247 might_sleep();
248#ifdef CONFIG_DEBUG_VM
249 if (!user_mode(regs) &&
250 !search_exception_tables(regs->UCreg_pc))
251 goto no_context;
252#endif
253 }
254
255 fault = __do_pf(mm, addr, fsr, tsk);
256 up_read(&mm->mmap_sem);
257
258 /*
259 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
260 */
261 if (likely(!(fault &
262 (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
263 return 0;
264
265 if (fault & VM_FAULT_OOM) {
266 /*
267 * We ran out of memory, call the OOM killer, and return to
268 * userspace (which will retry the fault, or kill us if we
269 * got oom-killed)
270 */
271 pagefault_out_of_memory();
272 return 0;
273 }
274
275 /*
276 * If we are in kernel mode at this point, we
277 * have no context to handle this fault with.
278 */
279 if (!user_mode(regs))
280 goto no_context;
281
282 if (fault & VM_FAULT_SIGBUS) {
283 /*
284 * We had some memory, but were unable to
285 * successfully fix up this page fault.
286 */
287 sig = SIGBUS;
288 code = BUS_ADRERR;
289 } else {
290 /*
291 * Something tried to access memory that
292 * isn't in our memory map..
293 */
294 sig = SIGSEGV;
295 code = fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR;
296 }
297
298 __do_user_fault(tsk, addr, fsr, sig, code, regs);
299 return 0;
300
301no_context:
302 __do_kernel_fault(mm, addr, fsr, regs);
303 return 0;
304}
305
306/*
307 * First Level Translation Fault Handler
308 *
309 * We enter here because the first level page table doesn't contain
310 * a valid entry for the address.
311 *
312 * If the address is in kernel space (>= TASK_SIZE), then we are
313 * probably faulting in the vmalloc() area.
314 *
315 * If the init_task's first level page tables contains the relevant
316 * entry, we copy the it to this task. If not, we send the process
317 * a signal, fixup the exception, or oops the kernel.
318 *
319 * NOTE! We MUST NOT take any locks for this case. We may be in an
320 * interrupt or a critical region, and should only copy the information
321 * from the master page table, nothing more.
322 */
323static int do_ifault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
324{
325 unsigned int index;
326 pgd_t *pgd, *pgd_k;
327 pmd_t *pmd, *pmd_k;
328
329 if (addr < TASK_SIZE)
330 return do_pf(addr, fsr, regs);
331
332 if (user_mode(regs))
333 goto bad_area;
334
335 index = pgd_index(addr);
336
337 pgd = cpu_get_pgd() + index;
338 pgd_k = init_mm.pgd + index;
339
340 if (pgd_none(*pgd_k))
341 goto bad_area;
342
343 pmd_k = pmd_offset((pud_t *) pgd_k, addr);
344 pmd = pmd_offset((pud_t *) pgd, addr);
345
346 if (pmd_none(*pmd_k))
347 goto bad_area;
348
349 set_pmd(pmd, *pmd_k);
350 flush_pmd_entry(pmd);
351 return 0;
352
353bad_area:
354 do_bad_area(addr, fsr, regs);
355 return 0;
356}
357
358/*
359 * This abort handler always returns "fault".
360 */
361static int do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
362{
363 return 1;
364}
365
366static int do_good(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
367{
368 unsigned int res1, res2;
369
370 printk("dabt exception but no error!\n");
371
372 __asm__ __volatile__(
373 "mff %0,f0\n"
374 "mff %1,f1\n"
375 : "=r"(res1), "=r"(res2)
376 :
377 : "memory");
378
379 printk(KERN_EMERG "r0 :%08x r1 :%08x\n", res1, res2);
380 panic("shut up\n");
381 return 0;
382}
383
384static struct fsr_info {
385 int (*fn) (unsigned long addr, unsigned int fsr, struct pt_regs *regs);
386 int sig;
387 int code;
388 const char *name;
389} fsr_info[] = {
390 /*
391 * The following are the standard Unicore-I and UniCore-II aborts.
392 */
393 { do_good, SIGBUS, 0, "no error" },
394 { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
395 { do_bad, SIGBUS, BUS_OBJERR, "external exception" },
396 { do_bad, SIGBUS, 0, "burst operation" },
397 { do_bad, SIGBUS, 0, "unknown 00100" },
398 { do_ifault, SIGSEGV, SEGV_MAPERR, "2nd level pt non-exist"},
399 { do_bad, SIGBUS, 0, "2nd lvl large pt non-exist" },
400 { do_bad, SIGBUS, 0, "invalid pte" },
401 { do_pf, SIGSEGV, SEGV_MAPERR, "page miss" },
402 { do_bad, SIGBUS, 0, "middle page miss" },
403 { do_bad, SIGBUS, 0, "large page miss" },
404 { do_pf, SIGSEGV, SEGV_MAPERR, "super page (section) miss" },
405 { do_bad, SIGBUS, 0, "unknown 01100" },
406 { do_bad, SIGBUS, 0, "unknown 01101" },
407 { do_bad, SIGBUS, 0, "unknown 01110" },
408 { do_bad, SIGBUS, 0, "unknown 01111" },
409 { do_bad, SIGBUS, 0, "addr: up 3G or IO" },
410 { do_pf, SIGSEGV, SEGV_ACCERR, "read unreadable addr" },
411 { do_pf, SIGSEGV, SEGV_ACCERR, "write unwriteable addr"},
412 { do_pf, SIGSEGV, SEGV_ACCERR, "exec unexecutable addr"},
413 { do_bad, SIGBUS, 0, "unknown 10100" },
414 { do_bad, SIGBUS, 0, "unknown 10101" },
415 { do_bad, SIGBUS, 0, "unknown 10110" },
416 { do_bad, SIGBUS, 0, "unknown 10111" },
417 { do_bad, SIGBUS, 0, "unknown 11000" },
418 { do_bad, SIGBUS, 0, "unknown 11001" },
419 { do_bad, SIGBUS, 0, "unknown 11010" },
420 { do_bad, SIGBUS, 0, "unknown 11011" },
421 { do_bad, SIGBUS, 0, "unknown 11100" },
422 { do_bad, SIGBUS, 0, "unknown 11101" },
423 { do_bad, SIGBUS, 0, "unknown 11110" },
424 { do_bad, SIGBUS, 0, "unknown 11111" }
425};
426
427void __init hook_fault_code(int nr,
428 int (*fn) (unsigned long, unsigned int, struct pt_regs *),
429 int sig, int code, const char *name)
430{
431 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
432 BUG();
433
434 fsr_info[nr].fn = fn;
435 fsr_info[nr].sig = sig;
436 fsr_info[nr].code = code;
437 fsr_info[nr].name = name;
438}
439
440/*
441 * Dispatch a data abort to the relevant handler.
442 */
443asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr,
444 struct pt_regs *regs)
445{
446 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
447 struct siginfo info;
448
449 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
450 return;
451
452 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
453 inf->name, fsr, addr);
454
455 info.si_signo = inf->sig;
456 info.si_errno = 0;
457 info.si_code = inf->code;
458 info.si_addr = (void __user *)addr;
459 uc32_notify_die("", regs, &info, fsr, 0);
460}
461
462asmlinkage void do_PrefetchAbort(unsigned long addr,
463 unsigned int ifsr, struct pt_regs *regs)
464{
465 const struct fsr_info *inf = fsr_info + fsr_fs(ifsr);
466 struct siginfo info;
467
468 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
469 return;
470
471 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
472 inf->name, ifsr, addr);
473
474 info.si_signo = inf->sig;
475 info.si_errno = 0;
476 info.si_code = inf->code;
477 info.si_addr = (void __user *)addr;
478 uc32_notify_die("", regs, &info, ifsr, 0);
479}
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
new file mode 100644
index 000000000000..7bf3d588631f
--- /dev/null
+++ b/arch/unicore32/mm/mmu.c
@@ -0,0 +1,533 @@
1/*
2 * linux/arch/unicore32/mm/mmu.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/mman.h>
17#include <linux/nodemask.h>
18#include <linux/memblock.h>
19#include <linux/fs.h>
20#include <linux/bootmem.h>
21#include <linux/io.h>
22
23#include <asm/cputype.h>
24#include <asm/sections.h>
25#include <asm/setup.h>
26#include <asm/sizes.h>
27#include <asm/tlb.h>
28
29#include <mach/map.h>
30
31#include "mm.h"
32
33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34
35/*
36 * empty_zero_page is a special page that is used for
37 * zero-initialized data and COW.
38 */
39struct page *empty_zero_page;
40EXPORT_SYMBOL(empty_zero_page);
41
42/*
43 * The pmd table for the upper-most set of pages.
44 */
45pmd_t *top_pmd;
46
47pgprot_t pgprot_user;
48EXPORT_SYMBOL(pgprot_user);
49
50pgprot_t pgprot_kernel;
51EXPORT_SYMBOL(pgprot_kernel);
52
53static int __init noalign_setup(char *__unused)
54{
55 cr_alignment &= ~CR_A;
56 cr_no_alignment &= ~CR_A;
57 set_cr(cr_alignment);
58 return 1;
59}
60__setup("noalign", noalign_setup);
61
62void adjust_cr(unsigned long mask, unsigned long set)
63{
64 unsigned long flags;
65
66 mask &= ~CR_A;
67
68 set &= mask;
69
70 local_irq_save(flags);
71
72 cr_no_alignment = (cr_no_alignment & ~mask) | set;
73 cr_alignment = (cr_alignment & ~mask) | set;
74
75 set_cr((get_cr() & ~mask) | set);
76
77 local_irq_restore(flags);
78}
79
80struct map_desc {
81 unsigned long virtual;
82 unsigned long pfn;
83 unsigned long length;
84 unsigned int type;
85};
86
87#define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \
88 PTE_DIRTY | PTE_READ | PTE_WRITE)
89#define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \
90 PMD_SECT_READ | PMD_SECT_WRITE)
91
92static struct mem_type mem_types[] = {
93 [MT_DEVICE] = { /* Strongly ordered */
94 .prot_pte = PROT_PTE_DEVICE,
95 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
96 .prot_sect = PROT_SECT_DEVICE,
97 },
98 /*
99 * MT_KUSER: pte for vecpage -- cacheable,
100 * and sect for unigfx mmap -- noncacheable
101 */
102 [MT_KUSER] = {
103 .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
104 PTE_CACHEABLE | PTE_READ | PTE_EXEC,
105 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
106 .prot_sect = PROT_SECT_DEVICE,
107 },
108 [MT_HIGH_VECTORS] = {
109 .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
110 PTE_CACHEABLE | PTE_READ | PTE_WRITE |
111 PTE_EXEC,
112 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
113 },
114 [MT_MEMORY] = {
115 .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
116 PTE_WRITE | PTE_EXEC,
117 .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
118 .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
119 PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC,
120 },
121 [MT_ROM] = {
122 .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
123 PMD_SECT_READ,
124 },
125};
126
127const struct mem_type *get_mem_type(unsigned int type)
128{
129 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
130}
131EXPORT_SYMBOL(get_mem_type);
132
133/*
134 * Adjust the PMD section entries according to the CPU in use.
135 */
136static void __init build_mem_type_table(void)
137{
138 pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE);
139 pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG |
140 PTE_DIRTY | PTE_READ | PTE_WRITE |
141 PTE_EXEC | PTE_CACHEABLE);
142}
143
144#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
145
146static void __init *early_alloc(unsigned long sz)
147{
148 void *ptr = __va(memblock_alloc(sz, sz));
149 memset(ptr, 0, sz);
150 return ptr;
151}
152
153static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
154 unsigned long prot)
155{
156 if (pmd_none(*pmd)) {
157 pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
158 __pmd_populate(pmd, __pa(pte) | prot);
159 }
160 BUG_ON(pmd_bad(*pmd));
161 return pte_offset_kernel(pmd, addr);
162}
163
164static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
165 unsigned long end, unsigned long pfn,
166 const struct mem_type *type)
167{
168 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
169 do {
170 set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte)));
171 pfn++;
172 } while (pte++, addr += PAGE_SIZE, addr != end);
173}
174
175static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
176 unsigned long end, unsigned long phys,
177 const struct mem_type *type)
178{
179 pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
180
181 /*
182 * Try a section mapping - end, addr and phys must all be aligned
183 * to a section boundary.
184 */
185 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
186 pmd_t *p = pmd;
187
188 do {
189 set_pmd(pmd, __pmd(phys | type->prot_sect));
190 phys += SECTION_SIZE;
191 } while (pmd++, addr += SECTION_SIZE, addr != end);
192
193 flush_pmd_entry(p);
194 } else {
195 /*
196 * No need to loop; pte's aren't interested in the
197 * individual L1 entries.
198 */
199 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
200 }
201}
202
203/*
204 * Create the page directory entries and any necessary
205 * page tables for the mapping specified by `md'. We
206 * are able to cope here with varying sizes and address
207 * offsets, and we take full advantage of sections.
208 */
209static void __init create_mapping(struct map_desc *md)
210{
211 unsigned long phys, addr, length, end;
212 const struct mem_type *type;
213 pgd_t *pgd;
214
215 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
216 printk(KERN_WARNING "BUG: not creating mapping for "
217 "0x%08llx at 0x%08lx in user region\n",
218 __pfn_to_phys((u64)md->pfn), md->virtual);
219 return;
220 }
221
222 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
223 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
224 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
225 "overlaps vmalloc space\n",
226 __pfn_to_phys((u64)md->pfn), md->virtual);
227 }
228
229 type = &mem_types[md->type];
230
231 addr = md->virtual & PAGE_MASK;
232 phys = (unsigned long)__pfn_to_phys(md->pfn);
233 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
234
235 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
236 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
237 "be mapped using pages, ignoring.\n",
238 __pfn_to_phys(md->pfn), addr);
239 return;
240 }
241
242 pgd = pgd_offset_k(addr);
243 end = addr + length;
244 do {
245 unsigned long next = pgd_addr_end(addr, end);
246
247 alloc_init_section(pgd, addr, next, phys, type);
248
249 phys += next - addr;
250 addr = next;
251 } while (pgd++, addr != end);
252}
253
254static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
255
256/*
257 * vmalloc=size forces the vmalloc area to be exactly 'size'
258 * bytes. This can be used to increase (or decrease) the vmalloc
259 * area - the default is 128m.
260 */
261static int __init early_vmalloc(char *arg)
262{
263 unsigned long vmalloc_reserve = memparse(arg, NULL);
264
265 if (vmalloc_reserve < SZ_16M) {
266 vmalloc_reserve = SZ_16M;
267 printk(KERN_WARNING
268 "vmalloc area too small, limiting to %luMB\n",
269 vmalloc_reserve >> 20);
270 }
271
272 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
273 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
274 printk(KERN_WARNING
275 "vmalloc area is too big, limiting to %luMB\n",
276 vmalloc_reserve >> 20);
277 }
278
279 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
280 return 0;
281}
282early_param("vmalloc", early_vmalloc);
283
284static phys_addr_t lowmem_limit __initdata = SZ_1G;
285
286static void __init sanity_check_meminfo(void)
287{
288 int i, j;
289
290 lowmem_limit = __pa(vmalloc_min - 1) + 1;
291 memblock_set_current_limit(lowmem_limit);
292
293 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
294 struct membank *bank = &meminfo.bank[j];
295 *bank = meminfo.bank[i];
296 j++;
297 }
298 meminfo.nr_banks = j;
299}
300
301static inline void prepare_page_table(void)
302{
303 unsigned long addr;
304 phys_addr_t end;
305
306 /*
307 * Clear out all the mappings below the kernel image.
308 */
309 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
310 pmd_clear(pmd_off_k(addr));
311
312 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
313 pmd_clear(pmd_off_k(addr));
314
315 /*
316 * Find the end of the first block of lowmem.
317 */
318 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
319 if (end >= lowmem_limit)
320 end = lowmem_limit;
321
322 /*
323 * Clear out all the kernel space mappings, except for the first
324 * memory bank, up to the end of the vmalloc region.
325 */
326 for (addr = __phys_to_virt(end);
327 addr < VMALLOC_END; addr += PGDIR_SIZE)
328 pmd_clear(pmd_off_k(addr));
329}
330
331/*
332 * Reserve the special regions of memory
333 */
334void __init uc32_mm_memblock_reserve(void)
335{
336 /*
337 * Reserve the page tables. These are already in use,
338 * and can only be in node 0.
339 */
340 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
341
342#ifdef CONFIG_PUV3_UNIGFX
343 /*
344 * These should likewise go elsewhere. They pre-reserve the
345 * screen/video memory region at the 48M~64M of main system memory.
346 */
347 memblock_reserve(PKUNITY_UNIGFX_MMAP_BASE, PKUNITY_UNIGFX_MMAP_SIZE);
348 memblock_reserve(PKUNITY_UVC_MMAP_BASE, PKUNITY_UVC_MMAP_SIZE);
349#endif
350}
351
352/*
353 * Set up device the mappings. Since we clear out the page tables for all
354 * mappings above VMALLOC_END, we will remove any debug device mappings.
355 * This means you have to be careful how you debug this function, or any
356 * called function. This means you can't use any function or debugging
357 * method which may touch any device, otherwise the kernel _will_ crash.
358 */
359static void __init devicemaps_init(void)
360{
361 struct map_desc map;
362 unsigned long addr;
363 void *vectors;
364
365 /*
366 * Allocate the vector page early.
367 */
368 vectors = early_alloc(PAGE_SIZE);
369
370 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
371 pmd_clear(pmd_off_k(addr));
372
373 /*
374 * Create a mapping for UniGFX VRAM
375 */
376#ifdef CONFIG_PUV3_UNIGFX
377 map.pfn = __phys_to_pfn(PKUNITY_UNIGFX_MMAP_BASE);
378 map.virtual = KUSER_UNIGFX_BASE;
379 map.length = PKUNITY_UNIGFX_MMAP_SIZE;
380 map.type = MT_KUSER;
381 create_mapping(&map);
382#endif
383
384 /*
385 * Create a mapping for the machine vectors at the high-vectors
386 * location (0xffff0000). If we aren't using high-vectors, also
387 * create a mapping at the low-vectors virtual address.
388 */
389 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
390 map.virtual = VECTORS_BASE;
391 map.length = PAGE_SIZE;
392 map.type = MT_HIGH_VECTORS;
393 create_mapping(&map);
394
395 /*
396 * Create a mapping for the kuser page at the special
397 * location (0xbfff0000) to the same vectors location.
398 */
399 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
400 map.virtual = KUSER_VECPAGE_BASE;
401 map.length = PAGE_SIZE;
402 map.type = MT_KUSER;
403 create_mapping(&map);
404
405 /*
406 * Finally flush the caches and tlb to ensure that we're in a
407 * consistent state wrt the writebuffer. This also ensures that
408 * any write-allocated cache lines in the vector page are written
409 * back. After this point, we can start to touch devices again.
410 */
411 local_flush_tlb_all();
412 flush_cache_all();
413}
414
415static void __init map_lowmem(void)
416{
417 struct memblock_region *reg;
418
419 /* Map all the lowmem memory banks. */
420 for_each_memblock(memory, reg) {
421 phys_addr_t start = reg->base;
422 phys_addr_t end = start + reg->size;
423 struct map_desc map;
424
425 if (end > lowmem_limit)
426 end = lowmem_limit;
427 if (start >= end)
428 break;
429
430 map.pfn = __phys_to_pfn(start);
431 map.virtual = __phys_to_virt(start);
432 map.length = end - start;
433 map.type = MT_MEMORY;
434
435 create_mapping(&map);
436 }
437}
438
439/*
440 * paging_init() sets up the page tables, initialises the zone memory
441 * maps, and sets up the zero page, bad page and bad page tables.
442 */
443void __init paging_init(void)
444{
445 void *zero_page;
446
447 build_mem_type_table();
448 sanity_check_meminfo();
449 prepare_page_table();
450 map_lowmem();
451 devicemaps_init();
452
453 top_pmd = pmd_off_k(0xffff0000);
454
455 /* allocate the zero page. */
456 zero_page = early_alloc(PAGE_SIZE);
457
458 bootmem_init();
459
460 empty_zero_page = virt_to_page(zero_page);
461 __flush_dcache_page(NULL, empty_zero_page);
462}
463
464/*
465 * In order to soft-boot, we need to insert a 1:1 mapping in place of
466 * the user-mode pages. This will then ensure that we have predictable
467 * results when turning the mmu off
468 */
469void setup_mm_for_reboot(char mode)
470{
471 unsigned long base_pmdval;
472 pgd_t *pgd;
473 int i;
474
475 /*
476 * We need to access to user-mode page tables here. For kernel threads
477 * we don't have any user-mode mappings so we use the context that we
478 * "borrowed".
479 */
480 pgd = current->active_mm->pgd;
481
482 base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT;
483
484 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
485 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
486 pmd_t *pmd;
487
488 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
489 set_pmd(pmd, __pmd(pmdval));
490 flush_pmd_entry(pmd);
491 }
492
493 local_flush_tlb_all();
494}
495
496/*
497 * Take care of architecture specific things when placing a new PTE into
498 * a page table, or changing an existing PTE. Basically, there are two
499 * things that we need to take care of:
500 *
501 * 1. If PG_dcache_clean is not set for the page, we need to ensure
502 * that any cache entries for the kernels virtual memory
503 * range are written back to the page.
504 * 2. If we have multiple shared mappings of the same space in
505 * an object, we need to deal with the cache aliasing issues.
506 *
507 * Note that the pte lock will be held.
508 */
509void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
510 pte_t *ptep)
511{
512 unsigned long pfn = pte_pfn(*ptep);
513 struct address_space *mapping;
514 struct page *page;
515
516 if (!pfn_valid(pfn))
517 return;
518
519 /*
520 * The zero page is never written to, so never has any dirty
521 * cache lines, and therefore never needs to be flushed.
522 */
523 page = pfn_to_page(pfn);
524 if (page == ZERO_PAGE(0))
525 return;
526
527 mapping = page_mapping(page);
528 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
529 __flush_dcache_page(mapping, page);
530 if (mapping)
531 if (vma->vm_flags & VM_EXEC)
532 __flush_icache_all();
533}
diff --git a/arch/unicore32/mm/pgd.c b/arch/unicore32/mm/pgd.c
new file mode 100644
index 000000000000..632cef7cd378
--- /dev/null
+++ b/arch/unicore32/mm/pgd.c
@@ -0,0 +1,102 @@
1/*
2 * linux/arch/unicore32/mm/pgd.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/mm.h>
13#include <linux/gfp.h>
14#include <linux/highmem.h>
15
16#include <asm/pgalloc.h>
17#include <asm/page.h>
18#include <asm/tlbflush.h>
19
20#include "mm.h"
21
22#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
23
24/*
25 * need to get a 4k page for level 1
26 */
27pgd_t *get_pgd_slow(struct mm_struct *mm)
28{
29 pgd_t *new_pgd, *init_pgd;
30 pmd_t *new_pmd, *init_pmd;
31 pte_t *new_pte, *init_pte;
32
33 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
34 if (!new_pgd)
35 goto no_pgd;
36
37 memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
38
39 /*
40 * Copy over the kernel and IO PGD entries
41 */
42 init_pgd = pgd_offset_k(0);
43 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
44 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
45
46 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
47
48 if (!vectors_high()) {
49 /*
50 * On UniCore, first page must always be allocated since it
51 * contains the machine vectors.
52 */
53 new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
54 if (!new_pmd)
55 goto no_pmd;
56
57 new_pte = pte_alloc_map(mm, new_pmd, 0);
58 if (!new_pte)
59 goto no_pte;
60
61 init_pmd = pmd_offset((pud_t *)init_pgd, 0);
62 init_pte = pte_offset_map(init_pmd, 0);
63 set_pte(new_pte, *init_pte);
64 pte_unmap(init_pte);
65 pte_unmap(new_pte);
66 }
67
68 return new_pgd;
69
70no_pte:
71 pmd_free(mm, new_pmd);
72no_pmd:
73 free_pages((unsigned long)new_pgd, 0);
74no_pgd:
75 return NULL;
76}
77
78void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
79{
80 pmd_t *pmd;
81 pgtable_t pte;
82
83 if (!pgd)
84 return;
85
86 /* pgd is always present and good */
87 pmd = pmd_off(pgd, 0);
88 if (pmd_none(*pmd))
89 goto free;
90 if (pmd_bad(*pmd)) {
91 pmd_ERROR(*pmd);
92 pmd_clear(pmd);
93 goto free;
94 }
95
96 pte = pmd_pgtable(*pmd);
97 pmd_clear(pmd);
98 pte_free(mm, pte);
99 pmd_free(mm, pmd);
100free:
101 free_pages((unsigned long) pgd, 0);
102}