aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Kuo <rkuo@codeaurora.org>2011-10-31 19:54:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 10:34:20 -0400
commit499236d9db5a349eeab2e9f2791e2d69c2e4ed53 (patch)
tree54520c8e5aa0558a78554127fe5dd4096e5b3558
parenta7e79840991eac8da36f437c653ee4b8cfbdafdc (diff)
Hexagon: Add page-fault support.
Signed-off-by: Richard Kuo <rkuo@codeaurora.org> Signed-off-by: Linas Vepstas <linas@codeaurora.org> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/hexagon/include/asm/mem-layout.h112
-rw-r--r--arch/hexagon/include/asm/vm_fault.h26
-rw-r--r--arch/hexagon/mm/init.c276
-rw-r--r--arch/hexagon/mm/vm_fault.c187
4 files changed, 601 insertions, 0 deletions
diff --git a/arch/hexagon/include/asm/mem-layout.h b/arch/hexagon/include/asm/mem-layout.h
new file mode 100644
index 000000000000..72e5dcda79f5
--- /dev/null
+++ b/arch/hexagon/include/asm/mem-layout.h
@@ -0,0 +1,112 @@
1/*
2 * Memory layout definitions for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#ifndef _ASM_HEXAGON_MEM_LAYOUT_H
22#define _ASM_HEXAGON_MEM_LAYOUT_H
23
24#include <linux/const.h>
25
26/*
27 * Have to do this for ginormous numbers, else they get printed as
28 * negative numbers, which the linker no likey when you try to
29 * assign it to the location counter.
30 */
31
32#define PAGE_OFFSET _AC(0xc0000000, UL)
33
34/*
35 * LOAD_ADDRESS is the physical/linear address of where in memory
36 * the kernel gets loaded. The 12 least significant bits must be zero (0)
37 * due to limitations on setting the EVB
38 *
39 */
40
41#ifndef LOAD_ADDRESS
42#define LOAD_ADDRESS 0x00000000
43#endif
44
45#define TASK_SIZE (PAGE_OFFSET)
46
47/* not sure how these are used yet */
48#define STACK_TOP TASK_SIZE
49#define STACK_TOP_MAX TASK_SIZE
50
51#ifndef __ASSEMBLY__
52enum fixed_addresses {
53 FIX_KMAP_BEGIN,
54 FIX_KMAP_END, /* check for per-cpuism */
55 __end_of_fixed_addresses
56};
57
58#define MIN_KERNEL_SEG 0x300 /* From 0xc0000000 */
59extern int max_kernel_seg;
60
61/*
62 * Start of vmalloc virtual address space for kernel;
63 * supposed to be based on the amount of physical memory available
64 */
65
66#define VMALLOC_START (PAGE_OFFSET + VMALLOC_OFFSET + \
67 (unsigned long)high_memory)
68
69/* Gap between physical ram and vmalloc space for guard purposes. */
70#define VMALLOC_OFFSET PAGE_SIZE
71
72/*
73 * Create the space between VMALLOC_START and FIXADDR_TOP backwards
74 * from the ... "top".
75 *
76 * Permanent IO mappings will live at 0xfexx_xxxx
77 * Hypervisor occupies the last 16MB page at 0xffxxxxxx
78 */
79
80#define FIXADDR_TOP 0xfe000000
81#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
82#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
83
84/*
85 * "permanent kernel mappings", defined as long-lasting mappings of
86 * high-memory page frames into the kernel address space.
87 */
88
89#define LAST_PKMAP PTRS_PER_PTE
90#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
91#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
92#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
93
94/*
95 * To the "left" of the fixed map space is the kmap space
96 *
97 * "Permanent Kernel Mappings"; fancy (or less fancy) PTE table
98 * that looks like it's actually walked.
99 * Need to check the alignment/shift usage; some archs use
100 * PMD_MASK on this value
101 */
102#define PKMAP_BASE (FIXADDR_START-PAGE_SIZE*LAST_PKMAP)
103
104/*
105 * 2 pages of guard gap between where vmalloc area ends
106 * and pkmap_base begins.
107 */
108#define VMALLOC_END (PKMAP_BASE-PAGE_SIZE*2)
109#endif /* !__ASSEMBLY__ */
110
111
112#endif /* _ASM_HEXAGON_MEM_LAYOUT_H */
diff --git a/arch/hexagon/include/asm/vm_fault.h b/arch/hexagon/include/asm/vm_fault.h
new file mode 100644
index 000000000000..cacda36ef5d5
--- /dev/null
+++ b/arch/hexagon/include/asm/vm_fault.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#ifndef _ASM_HEXAGON_VM_FAULT_H
20#define _ASM_HEXAGON_VM_FAULT_H
21
22extern void execute_protection_fault(struct pt_regs *);
23extern void write_protection_fault(struct pt_regs *);
24extern void read_protection_fault(struct pt_regs *);
25
26#endif
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
new file mode 100644
index 000000000000..b57d741750b2
--- /dev/null
+++ b/arch/hexagon/mm/init.c
@@ -0,0 +1,276 @@
1/*
2 * Memory subsystem initialization for Hexagon
3 *
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/mm.h>
23#include <linux/bootmem.h>
24#include <asm/atomic.h>
25#include <linux/highmem.h>
26#include <asm/tlb.h>
27#include <asm/sections.h>
28#include <asm/vm_mmu.h>
29
30/*
31 * Define a startpg just past the end of the kernel image and a lastpg
32 * that corresponds to the end of real or simulated platform memory.
33 */
34#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET))
35
36unsigned long bootmem_lastpg; /* Should be set by platform code */
37
38/* Set as variable to limit PMD copies */
39int max_kernel_seg = 0x303;
40
41/* think this should be (page_size-1) the way it's used...*/
42unsigned long zero_page_mask;
43
44/* indicate pfn's of high memory */
45unsigned long highstart_pfn, highend_pfn;
46
47/* struct mmu_gather defined in asm-generic.h; */
48DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
49
50/* Default cache attribute for newly created page tables */
51unsigned long _dflt_cache_att = CACHEDEF;
52
53/*
54 * The current "generation" of kernel map, which should not roll
55 * over until Hell freezes over. Actual bound in years needs to be
56 * calculated to confirm.
57 */
58DEFINE_SPINLOCK(kmap_gen_lock);
59
60/* checkpatch says don't init this to 0. */
61unsigned long long kmap_generation;
62
63/*
64 * mem_init - initializes memory
65 *
66 * Frees up bootmem
67 * Fixes up more stuff for HIGHMEM
68 * Calculates and displays memory available/used
69 */
70void __init mem_init(void)
71{
72 /* No idea where this is actually declared. Seems to evade LXR. */
73 totalram_pages += free_all_bootmem();
74 num_physpages = bootmem_lastpg; /* seriously, what? */
75
76 printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages);
77
78 /*
79 * To-Do: someone somewhere should wipe out the bootmem map
80 * after we're done?
81 */
82
83 /*
84 * This can be moved to some more virtual-memory-specific
85 * initialization hook at some point. Set the init_mm
86 * descriptors "context" value to point to the initial
87 * kernel segment table's physical address.
88 */
89 init_mm.context.ptbase = __pa(init_mm.pgd);
90}
91
92/*
93 * free_initmem - frees memory used by stuff declared with __init
94 *
95 * Todo: free pages between __init_begin and __init_end; possibly
96 * some devtree related stuff as well.
97 */
98void __init_refok free_initmem(void)
99{
100}
101
102/*
103 * free_initrd_mem - frees... initrd memory.
104 * @start - start of init memory
105 * @end - end of init memory
106 *
107 * Apparently has to be passed the address of the initrd memory.
108 *
109 * Wrapped by #ifdef CONFIG_BLKDEV_INITRD
110 */
111void free_initrd_mem(unsigned long start, unsigned long end)
112{
113}
114
115void sync_icache_dcache(pte_t pte)
116{
117 unsigned long addr;
118 struct page *page;
119
120 page = pte_page(pte);
121 addr = (unsigned long) page_address(page);
122
123 __vmcache_idsync(addr, PAGE_SIZE);
124}
125
126/*
127 * In order to set up page allocator "nodes",
128 * somebody has to call free_area_init() for UMA.
129 *
130 * In this mode, we only have one pg_data_t
131 * structure: contig_mem_data.
132 */
133void __init paging_init(void)
134{
135 unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
136
137 /*
138 * This is not particularly well documented anywhere, but
139 * give ZONE_NORMAL all the memory, including the big holes
140 * left by the kernel+bootmem_map which are already left as reserved
141 * in the bootmem_map; free_area_init should see those bits and
142 * adjust accordingly.
143 */
144
145 zones_sizes[ZONE_NORMAL] = max_low_pfn;
146
147 free_area_init(zones_sizes); /* sets up the zonelists and mem_map */
148
149 /*
150 * Start of high memory area. Will probably need something more
151 * fancy if we... get more fancy.
152 */
153 high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
154}
155
156#ifndef DMA_RESERVE
157#define DMA_RESERVE (4)
158#endif
159
160#define DMA_CHUNKSIZE (1<<22)
161#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE)
162
163/*
164 * Pick out the memory size. We look for mem=size,
165 * where size is "size[KkMm]"
166 */
167static int __init early_mem(char *p)
168{
169 unsigned long size;
170 char *endp;
171
172 size = memparse(p, &endp);
173
174 bootmem_lastpg = PFN_DOWN(size);
175
176 return 0;
177}
178early_param("mem", early_mem);
179
180size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
181
182void __init setup_arch_memory(void)
183{
184 int bootmap_size;
185 /* XXX Todo: this probably should be cleaned up */
186 u32 *segtable = (u32 *) &swapper_pg_dir[0];
187 u32 *segtable_end;
188
189 /*
190 * Set up boot memory allocator
191 *
192 * The Gorman book also talks about these functions.
193 * This needs to change for highmem setups.
194 */
195
196 /* Memory size needs to be a multiple of 16M */
197 bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
198 ~((BIG_KERNEL_PAGE_SIZE) - 1));
199
200 /*
201 * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
202 * memory allocation
203 */
204 bootmap_size = init_bootmem(bootmem_startpg, bootmem_lastpg -
205 PFN_DOWN(DMA_RESERVED_BYTES));
206
207 printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
208 printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
209 printk(KERN_INFO "bootmap_size: %d\n", bootmap_size);
210 printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
211
212 /*
213 * The default VM page tables (will be) populated with
214 * VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries
215 * higher than what we have memory for.
216 */
217
218 /* this is pointer arithmetic; each entry covers 4MB */
219 segtable = segtable + (PAGE_OFFSET >> 22);
220
221 /* this actually only goes to the end of the first gig */
222 segtable_end = segtable + (1<<(30-22));
223
224 /* Move forward to the start of empty pages */
225 segtable += bootmem_lastpg >> (22-PAGE_SHIFT);
226
227 {
228 int i;
229
230 for (i = 1 ; i <= DMA_RESERVE ; i++)
231 segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
232 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
233 | __HEXAGON_C_UNC << 6
234 | __HVM_PDE_S_4MB);
235 }
236
237 printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
238 segtable_end);
239 while (segtable < (segtable_end-8))
240 *(segtable++) = __HVM_PDE_S_INVALID;
241 /* stop the pointer at the device I/O 4MB page */
242
243 printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
244 segtable);
245
246#if 0
247 /* Other half of the early device table from vm_init_segtable. */
248 printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
249 (unsigned long) _K_init_devicetable-PAGE_OFFSET);
250 *segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
251 __HVM_PDE_S_4KB;
252 printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
253#endif
254
255 /*
256 * Free all the memory that wasn't taken up by the bootmap, the DMA
257 * reserve, or kernel itself.
258 */
259 free_bootmem(PFN_PHYS(bootmem_startpg)+bootmap_size,
260 PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size -
261 DMA_RESERVED_BYTES);
262
263 /*
264 * The bootmem allocator seemingly just lives to feed memory
265 * to the paging system
266 */
267 printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
268 paging_init(); /* See Gorman Book, 2.3 */
269
270 /*
271 * At this point, the page allocator is kind of initialized, but
272 * apparently no pages are available (just like with the bootmem
273 * allocator), and need to be freed themselves via mem_init(),
274 * which is called by start_kernel() later on in the process
275 */
276}
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
new file mode 100644
index 000000000000..c10b76ff9d65
--- /dev/null
+++ b/arch/hexagon/mm/vm_fault.c
@@ -0,0 +1,187 @@
1/*
2 * Memory fault handling for Hexagon
3 *
4 * Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * Page fault handling for the Hexagon Virtual Machine.
23 * Can also be called by a native port emulating the HVM
24 * execptions.
25 */
26
27#include <asm/pgtable.h>
28#include <asm/traps.h>
29#include <asm/uaccess.h>
30#include <linux/mm.h>
31#include <linux/signal.h>
32#include <linux/module.h>
33#include <linux/hardirq.h>
34
35/*
36 * Decode of hardware exception sends us to one of several
37 * entry points. At each, we generate canonical arguments
38 * for handling by the abstract memory management code.
39 */
40#define FLT_IFETCH -1
41#define FLT_LOAD 0
42#define FLT_STORE 1
43
44
45/*
46 * Canonical page fault handler
47 */
48void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
49{
50 struct vm_area_struct *vma;
51 struct mm_struct *mm = current->mm;
52 siginfo_t info;
53 int si_code = SEGV_MAPERR;
54 int fault;
55 const struct exception_table_entry *fixup;
56
57 /*
58 * If we're in an interrupt or have no user context,
59 * then must not take the fault.
60 */
61 if (unlikely(in_interrupt() || !mm))
62 goto no_context;
63
64 local_irq_enable();
65
66 down_read(&mm->mmap_sem);
67 vma = find_vma(mm, address);
68 if (!vma)
69 goto bad_area;
70
71 if (vma->vm_start <= address)
72 goto good_area;
73
74 if (!(vma->vm_flags & VM_GROWSDOWN))
75 goto bad_area;
76
77 if (expand_stack(vma, address))
78 goto bad_area;
79
80good_area:
81 /* Address space is OK. Now check access rights. */
82 si_code = SEGV_ACCERR;
83
84 switch (cause) {
85 case FLT_IFETCH:
86 if (!(vma->vm_flags & VM_EXEC))
87 goto bad_area;
88 break;
89 case FLT_LOAD:
90 if (!(vma->vm_flags & VM_READ))
91 goto bad_area;
92 break;
93 case FLT_STORE:
94 if (!(vma->vm_flags & VM_WRITE))
95 goto bad_area;
96 break;
97 }
98
99 fault = handle_mm_fault(mm, vma, address, (cause > 0));
100
101 /* The most common case -- we are done. */
102 if (likely(!(fault & VM_FAULT_ERROR))) {
103 if (fault & VM_FAULT_MAJOR)
104 current->maj_flt++;
105 else
106 current->min_flt++;
107
108 up_read(&mm->mmap_sem);
109 return;
110 }
111
112 up_read(&mm->mmap_sem);
113
114 /* Handle copyin/out exception cases */
115 if (!user_mode(regs))
116 goto no_context;
117
118 if (fault & VM_FAULT_OOM) {
119 pagefault_out_of_memory();
120 return;
121 }
122
123 /* User-mode address is in the memory map, but we are
124 * unable to fix up the page fault.
125 */
126 if (fault & VM_FAULT_SIGBUS) {
127 info.si_signo = SIGBUS;
128 info.si_code = BUS_ADRERR;
129 }
130 /* Address is not in the memory map */
131 else {
132 info.si_signo = SIGSEGV;
133 info.si_code = SEGV_ACCERR;
134 }
135 info.si_errno = 0;
136 info.si_addr = (void __user *)address;
137 force_sig_info(info.si_code, &info, current);
138 return;
139
140bad_area:
141 up_read(&mm->mmap_sem);
142
143 if (user_mode(regs)) {
144 info.si_signo = SIGSEGV;
145 info.si_errno = 0;
146 info.si_code = si_code;
147 info.si_addr = (void *)address;
148 force_sig_info(SIGSEGV, &info, current);
149 return;
150 }
151 /* Kernel-mode fault falls through */
152
153no_context:
154 fixup = search_exception_tables(pt_elr(regs));
155 if (fixup) {
156 pt_set_elr(regs, fixup->fixup);
157 return;
158 }
159
160 /* Things are looking very, very bad now */
161 bust_spinlocks(1);
162 printk(KERN_EMERG "Unable to handle kernel paging request at "
163 "virtual address 0x%08lx, regs %p\n", address, regs);
164 die("Bad Kernel VA", regs, SIGKILL);
165}
166
167
168void read_protection_fault(struct pt_regs *regs)
169{
170 unsigned long badvadr = pt_badva(regs);
171
172 do_page_fault(badvadr, FLT_LOAD, regs);
173}
174
175void write_protection_fault(struct pt_regs *regs)
176{
177 unsigned long badvadr = pt_badva(regs);
178
179 do_page_fault(badvadr, FLT_STORE, regs);
180}
181
182void execute_protection_fault(struct pt_regs *regs)
183{
184 unsigned long badvadr = pt_badva(regs);
185
186 do_page_fault(badvadr, FLT_IFETCH, regs);
187}