diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/arm26/mm |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/arm26/mm')
-rw-r--r-- | arch/arm26/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/arm26/mm/extable.c | 25 | ||||
-rw-r--r-- | arch/arm26/mm/fault.c | 318 | ||||
-rw-r--r-- | arch/arm26/mm/fault.h | 5 | ||||
-rw-r--r-- | arch/arm26/mm/init.c | 412 | ||||
-rw-r--r-- | arch/arm26/mm/memc.c | 202 | ||||
-rw-r--r-- | arch/arm26/mm/proc-funcs.S | 359 | ||||
-rw-r--r-- | arch/arm26/mm/small_page.c | 194 |
8 files changed, 1521 insertions, 0 deletions
diff --git a/arch/arm26/mm/Makefile b/arch/arm26/mm/Makefile new file mode 100644 index 000000000000..a8fb166d5c6d --- /dev/null +++ b/arch/arm26/mm/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for the linux arm26-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o extable.o proc-funcs.o memc.o fault.o \ | ||
6 | small_page.o | ||
diff --git a/arch/arm26/mm/extable.c b/arch/arm26/mm/extable.c new file mode 100644 index 000000000000..2d9f5b5a78d6 --- /dev/null +++ b/arch/arm26/mm/extable.c | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * linux/arch/arm26/mm/extable.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <asm/uaccess.h> | ||
8 | |||
9 | int fixup_exception(struct pt_regs *regs) | ||
10 | { | ||
11 | const struct exception_table_entry *fixup; | ||
12 | |||
13 | fixup = search_exception_tables(instruction_pointer(regs)); | ||
14 | |||
15 | /* | ||
16 | * The kernel runs in SVC mode - make sure we keep running in SVC mode | ||
17 | * by frobbing the PSR appropriately (PSR and PC are in the same reg. | ||
18 | * on ARM26) | ||
19 | */ | ||
20 | if (fixup) | ||
21 | regs->ARM_pc = fixup->fixup | PSR_I_BIT | MODE_SVC26; | ||
22 | |||
23 | return fixup != NULL; | ||
24 | } | ||
25 | |||
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c new file mode 100644 index 000000000000..dacca8bb7744 --- /dev/null +++ b/arch/arm26/mm/fault.c | |||
@@ -0,0 +1,318 @@ | |||
1 | /* | ||
2 | * linux/arch/arm26/mm/fault.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Modifications for ARM processor (c) 1995-2001 Russell King | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/config.h> | ||
12 | #include <linux/signal.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/mman.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/init.h> | ||
24 | |||
25 | #include <asm/system.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/uaccess.h> //FIXME this header may be bogusly included | ||
28 | |||
29 | #include "fault.h" | ||
30 | |||
31 | #define FAULT_CODE_LDRSTRPOST 0x80 | ||
32 | #define FAULT_CODE_LDRSTRPRE 0x40 | ||
33 | #define FAULT_CODE_LDRSTRREG 0x20 | ||
34 | #define FAULT_CODE_LDMSTM 0x10 | ||
35 | #define FAULT_CODE_LDCSTC 0x08 | ||
36 | #define FAULT_CODE_PREFETCH 0x04 | ||
37 | #define FAULT_CODE_WRITE 0x02 | ||
38 | #define FAULT_CODE_FORCECOW 0x01 | ||
39 | |||
40 | #define DO_COW(m) ((m) & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW)) | ||
41 | #define READ_FAULT(m) (!((m) & FAULT_CODE_WRITE)) | ||
42 | #define DEBUG | ||
43 | /* | ||
44 | * This is useful to dump out the page tables associated with | ||
45 | * 'addr' in mm 'mm'. | ||
46 | */ | ||
47 | void show_pte(struct mm_struct *mm, unsigned long addr) | ||
48 | { | ||
49 | pgd_t *pgd; | ||
50 | |||
51 | if (!mm) | ||
52 | mm = &init_mm; | ||
53 | |||
54 | printk(KERN_ALERT "pgd = %p\n", mm->pgd); | ||
55 | pgd = pgd_offset(mm, addr); | ||
56 | printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); | ||
57 | |||
58 | do { | ||
59 | pmd_t *pmd; | ||
60 | pte_t *pte; | ||
61 | |||
62 | pmd = pmd_offset(pgd, addr); | ||
63 | |||
64 | if (pmd_none(*pmd)) | ||
65 | break; | ||
66 | |||
67 | if (pmd_bad(*pmd)) { | ||
68 | printk("(bad)"); | ||
69 | break; | ||
70 | } | ||
71 | |||
72 | /* We must not map this if we have highmem enabled */ | ||
73 | /* FIXME */ | ||
74 | pte = pte_offset_map(pmd, addr); | ||
75 | printk(", *pte=%08lx", pte_val(*pte)); | ||
76 | pte_unmap(pte); | ||
77 | } while(0); | ||
78 | |||
79 | printk("\n"); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Oops. The kernel tried to access some page that wasn't present. | ||
84 | */ | ||
85 | static void | ||
86 | __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | ||
87 | struct pt_regs *regs) | ||
88 | { | ||
89 | /* | ||
90 | * Are we prepared to handle this kernel fault? | ||
91 | */ | ||
92 | if (fixup_exception(regs)) | ||
93 | return; | ||
94 | |||
95 | /* | ||
96 | * No handler, we'll have to terminate things with extreme prejudice. | ||
97 | */ | ||
98 | bust_spinlocks(1); | ||
99 | printk(KERN_ALERT | ||
100 | "Unable to handle kernel %s at virtual address %08lx\n", | ||
101 | (addr < PAGE_SIZE) ? "NULL pointer dereference" : | ||
102 | "paging request", addr); | ||
103 | |||
104 | show_pte(mm, addr); | ||
105 | die("Oops", regs, fsr); | ||
106 | bust_spinlocks(0); | ||
107 | do_exit(SIGKILL); | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Something tried to access memory that isn't in our memory map.. | ||
112 | * User mode accesses just cause a SIGSEGV | ||
113 | */ | ||
114 | static void | ||
115 | __do_user_fault(struct task_struct *tsk, unsigned long addr, | ||
116 | unsigned int fsr, int code, struct pt_regs *regs) | ||
117 | { | ||
118 | struct siginfo si; | ||
119 | |||
120 | #ifdef CONFIG_DEBUG_USER | ||
121 | printk("%s: unhandled page fault at 0x%08lx, code 0x%03x\n", | ||
122 | tsk->comm, addr, fsr); | ||
123 | show_pte(tsk->mm, addr); | ||
124 | show_regs(regs); | ||
125 | //dump_backtrace(regs, tsk); // FIXME ARM32 dropped this - why? | ||
126 | while(1); //FIXME - hack to stop debug going nutso | ||
127 | #endif | ||
128 | |||
129 | tsk->thread.address = addr; | ||
130 | tsk->thread.error_code = fsr; | ||
131 | tsk->thread.trap_no = 14; | ||
132 | si.si_signo = SIGSEGV; | ||
133 | si.si_errno = 0; | ||
134 | si.si_code = code; | ||
135 | si.si_addr = (void *)addr; | ||
136 | force_sig_info(SIGSEGV, &si, tsk); | ||
137 | } | ||
138 | |||
139 | static int | ||
140 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | ||
141 | struct task_struct *tsk) | ||
142 | { | ||
143 | struct vm_area_struct *vma; | ||
144 | int fault, mask; | ||
145 | |||
146 | vma = find_vma(mm, addr); | ||
147 | fault = -2; /* bad map area */ | ||
148 | if (!vma) | ||
149 | goto out; | ||
150 | if (vma->vm_start > addr) | ||
151 | goto check_stack; | ||
152 | |||
153 | /* | ||
154 | * Ok, we have a good vm_area for this | ||
155 | * memory access, so we can handle it. | ||
156 | */ | ||
157 | good_area: | ||
158 | if (READ_FAULT(fsr)) /* read? */ | ||
159 | mask = VM_READ|VM_EXEC; | ||
160 | else | ||
161 | mask = VM_WRITE; | ||
162 | |||
163 | fault = -1; /* bad access type */ | ||
164 | if (!(vma->vm_flags & mask)) | ||
165 | goto out; | ||
166 | |||
167 | /* | ||
168 | * If for any reason at all we couldn't handle | ||
169 | * the fault, make sure we exit gracefully rather | ||
170 | * than endlessly redo the fault. | ||
171 | */ | ||
172 | survive: | ||
173 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr)); | ||
174 | |||
175 | /* | ||
176 | * Handle the "normal" cases first - successful and sigbus | ||
177 | */ | ||
178 | switch (fault) { | ||
179 | case 2: | ||
180 | tsk->maj_flt++; | ||
181 | return fault; | ||
182 | case 1: | ||
183 | tsk->min_flt++; | ||
184 | case 0: | ||
185 | return fault; | ||
186 | } | ||
187 | |||
188 | fault = -3; /* out of memory */ | ||
189 | if (tsk->pid != 1) | ||
190 | goto out; | ||
191 | |||
192 | /* | ||
193 | * If we are out of memory for pid1, | ||
194 | * sleep for a while and retry | ||
195 | */ | ||
196 | yield(); | ||
197 | goto survive; | ||
198 | |||
199 | check_stack: | ||
200 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) | ||
201 | goto good_area; | ||
202 | out: | ||
203 | return fault; | ||
204 | } | ||
205 | |||
206 | int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | ||
207 | { | ||
208 | struct task_struct *tsk; | ||
209 | struct mm_struct *mm; | ||
210 | int fault; | ||
211 | |||
212 | tsk = current; | ||
213 | mm = tsk->mm; | ||
214 | |||
215 | /* | ||
216 | * If we're in an interrupt or have no user | ||
217 | * context, we must not take the fault.. | ||
218 | */ | ||
219 | if (in_interrupt() || !mm) | ||
220 | goto no_context; | ||
221 | |||
222 | down_read(&mm->mmap_sem); | ||
223 | fault = __do_page_fault(mm, addr, fsr, tsk); | ||
224 | up_read(&mm->mmap_sem); | ||
225 | |||
226 | /* | ||
227 | * Handle the "normal" case first | ||
228 | */ | ||
229 | if (fault > 0) | ||
230 | return 0; | ||
231 | |||
232 | /* | ||
233 | * We had some memory, but were unable to | ||
234 | * successfully fix up this page fault. | ||
235 | */ | ||
236 | if (fault == 0){ | ||
237 | goto do_sigbus; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * If we are in kernel mode at this point, we | ||
242 | * have no context to handle this fault with. | ||
243 | * FIXME - is this test right? | ||
244 | */ | ||
245 | if (!user_mode(regs)){ | ||
246 | goto no_context; | ||
247 | } | ||
248 | |||
249 | if (fault == -3) { | ||
250 | /* | ||
251 | * We ran out of memory, or some other thing happened to | ||
252 | * us that made us unable to handle the page fault gracefully. | ||
253 | */ | ||
254 | printk("VM: killing process %s\n", tsk->comm); | ||
255 | do_exit(SIGKILL); | ||
256 | } | ||
257 | else{ | ||
258 | __do_user_fault(tsk, addr, fsr, fault == -1 ? SEGV_ACCERR : SEGV_MAPERR, regs); | ||
259 | } | ||
260 | |||
261 | return 0; | ||
262 | |||
263 | |||
264 | /* | ||
265 | * We ran out of memory, or some other thing happened to us that made | ||
266 | * us unable to handle the page fault gracefully. | ||
267 | */ | ||
268 | do_sigbus: | ||
269 | /* | ||
270 | * Send a sigbus, regardless of whether we were in kernel | ||
271 | * or user mode. | ||
272 | */ | ||
273 | tsk->thread.address = addr; //FIXME - need other bits setting? | ||
274 | tsk->thread.error_code = fsr; | ||
275 | tsk->thread.trap_no = 14; | ||
276 | force_sig(SIGBUS, tsk); | ||
277 | #ifdef CONFIG_DEBUG_USER | ||
278 | printk(KERN_DEBUG "%s: sigbus at 0x%08lx, pc=0x%08lx\n", | ||
279 | current->comm, addr, instruction_pointer(regs)); | ||
280 | #endif | ||
281 | |||
282 | /* Kernel mode? Handle exceptions or die */ | ||
283 | if (user_mode(regs)) | ||
284 | return 0; | ||
285 | |||
286 | no_context: | ||
287 | __do_kernel_fault(mm, addr, fsr, regs); | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Handle a data abort. Note that we have to handle a range of addresses | ||
293 | * on ARM2/3 for ldm. If both pages are zero-mapped, then we have to force | ||
294 | * a copy-on-write. However, on the second page, we always force COW. | ||
295 | */ | ||
296 | asmlinkage void | ||
297 | do_DataAbort(unsigned long min_addr, unsigned long max_addr, int mode, struct pt_regs *regs) | ||
298 | { | ||
299 | do_page_fault(min_addr, mode, regs); | ||
300 | |||
301 | if ((min_addr ^ max_addr) >> PAGE_SHIFT){ | ||
302 | do_page_fault(max_addr, mode | FAULT_CODE_FORCECOW, regs); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | asmlinkage int | ||
307 | do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) | ||
308 | { | ||
309 | #if 0 | ||
310 | if (the memc mapping for this page exists) { | ||
311 | printk ("Page in, but got abort (undefined instruction?)\n"); | ||
312 | return 0; | ||
313 | } | ||
314 | #endif | ||
315 | do_page_fault(addr, FAULT_CODE_PREFETCH, regs); | ||
316 | return 1; | ||
317 | } | ||
318 | |||
diff --git a/arch/arm26/mm/fault.h b/arch/arm26/mm/fault.h new file mode 100644 index 000000000000..4442d00d86ac --- /dev/null +++ b/arch/arm26/mm/fault.h | |||
@@ -0,0 +1,5 @@ | |||
1 | void show_pte(struct mm_struct *mm, unsigned long addr); | ||
2 | |||
3 | int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs); | ||
4 | |||
5 | unsigned long search_extable(unsigned long addr); //FIXME - is it right? | ||
diff --git a/arch/arm26/mm/init.c b/arch/arm26/mm/init.c new file mode 100644 index 000000000000..1f09a9d0fb83 --- /dev/null +++ b/arch/arm26/mm/init.c | |||
@@ -0,0 +1,412 @@ | |||
1 | /* | ||
2 | * linux/arch/arm26/mm/init.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2002 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/signal.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/swap.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/initrd.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | #include <linux/blkdev.h> | ||
26 | |||
27 | #include <asm/segment.h> | ||
28 | #include <asm/mach-types.h> | ||
29 | #include <asm/dma.h> | ||
30 | #include <asm/hardware.h> | ||
31 | #include <asm/setup.h> | ||
32 | #include <asm/tlb.h> | ||
33 | |||
34 | #include <asm/map.h> | ||
35 | |||
36 | |||
37 | #define TABLE_SIZE PTRS_PER_PTE * sizeof(pte_t)) | ||
38 | |||
39 | struct mmu_gather mmu_gathers[NR_CPUS]; | ||
40 | |||
41 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
42 | extern char _stext, _text, _etext, _end, __init_begin, __init_end; | ||
43 | #ifdef CONFIG_XIP_KERNEL | ||
44 | extern char _endtext, _sdata; | ||
45 | #endif | ||
46 | extern unsigned long phys_initrd_start; | ||
47 | extern unsigned long phys_initrd_size; | ||
48 | |||
49 | /* | ||
50 | * The sole use of this is to pass memory configuration | ||
51 | * data from paging_init to mem_init. | ||
52 | */ | ||
53 | static struct meminfo meminfo __initdata = { 0, }; | ||
54 | |||
55 | /* | ||
56 | * empty_zero_page is a special page that is used for | ||
57 | * zero-initialized data and COW. | ||
58 | */ | ||
59 | struct page *empty_zero_page; | ||
60 | |||
61 | void show_mem(void) | ||
62 | { | ||
63 | int free = 0, total = 0, reserved = 0; | ||
64 | int shared = 0, cached = 0, slab = 0; | ||
65 | struct page *page, *end; | ||
66 | |||
67 | printk("Mem-info:\n"); | ||
68 | show_free_areas(); | ||
69 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
70 | |||
71 | |||
72 | page = NODE_MEM_MAP(0); | ||
73 | end = page + NODE_DATA(0)->node_spanned_pages; | ||
74 | |||
75 | do { | ||
76 | total++; | ||
77 | if (PageReserved(page)) | ||
78 | reserved++; | ||
79 | else if (PageSwapCache(page)) | ||
80 | cached++; | ||
81 | else if (PageSlab(page)) | ||
82 | slab++; | ||
83 | else if (!page_count(page)) | ||
84 | free++; | ||
85 | else | ||
86 | shared += page_count(page) - 1; | ||
87 | page++; | ||
88 | } while (page < end); | ||
89 | |||
90 | printk("%d pages of RAM\n", total); | ||
91 | printk("%d free pages\n", free); | ||
92 | printk("%d reserved pages\n", reserved); | ||
93 | printk("%d slab pages\n", slab); | ||
94 | printk("%d pages shared\n", shared); | ||
95 | printk("%d pages swap cached\n", cached); | ||
96 | } | ||
97 | |||
98 | struct node_info { | ||
99 | unsigned int start; | ||
100 | unsigned int end; | ||
101 | int bootmap_pages; | ||
102 | }; | ||
103 | |||
104 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) | ||
105 | #define PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) | ||
106 | #define PFN_SIZE(x) ((x) >> PAGE_SHIFT) | ||
107 | #define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \ | ||
108 | (((unsigned long)(s)) & PAGE_MASK)) | ||
109 | |||
110 | /* | ||
111 | * FIXME: We really want to avoid allocating the bootmap bitmap | ||
112 | * over the top of the initrd. Hopefully, this is located towards | ||
113 | * the start of a bank, so if we allocate the bootmap bitmap at | ||
114 | * the end, we won't clash. | ||
115 | */ | ||
116 | static unsigned int __init | ||
117 | find_bootmap_pfn(struct meminfo *mi, unsigned int bootmap_pages) | ||
118 | { | ||
119 | unsigned int start_pfn, bootmap_pfn; | ||
120 | unsigned int start, end; | ||
121 | |||
122 | start_pfn = PFN_UP((unsigned long)&_end); | ||
123 | bootmap_pfn = 0; | ||
124 | |||
125 | /* ARM26 machines only have one node */ | ||
126 | if (mi->bank->node != 0) | ||
127 | BUG(); | ||
128 | |||
129 | start = PFN_UP(mi->bank->start); | ||
130 | end = PFN_DOWN(mi->bank->size + mi->bank->start); | ||
131 | |||
132 | if (start < start_pfn) | ||
133 | start = start_pfn; | ||
134 | |||
135 | if (end <= start) | ||
136 | BUG(); | ||
137 | |||
138 | if (end - start >= bootmap_pages) | ||
139 | bootmap_pfn = start; | ||
140 | else | ||
141 | BUG(); | ||
142 | |||
143 | return bootmap_pfn; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Scan the memory info structure and pull out: | ||
148 | * - the end of memory | ||
149 | * - the number of nodes | ||
150 | * - the pfn range of each node | ||
151 | * - the number of bootmem bitmap pages | ||
152 | */ | ||
153 | static void __init | ||
154 | find_memend_and_nodes(struct meminfo *mi, struct node_info *np) | ||
155 | { | ||
156 | unsigned int memend_pfn = 0; | ||
157 | |||
158 | nodes_clear(node_online_map); | ||
159 | node_set_online(0); | ||
160 | |||
161 | np->bootmap_pages = 0; | ||
162 | |||
163 | if (mi->bank->size == 0) { | ||
164 | BUG(); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Get the start and end pfns for this bank | ||
169 | */ | ||
170 | np->start = PFN_UP(mi->bank->start); | ||
171 | np->end = PFN_DOWN(mi->bank->start + mi->bank->size); | ||
172 | |||
173 | if (memend_pfn < np->end) | ||
174 | memend_pfn = np->end; | ||
175 | |||
176 | /* | ||
177 | * Calculate the number of pages we require to | ||
178 | * store the bootmem bitmaps. | ||
179 | */ | ||
180 | np->bootmap_pages = bootmem_bootmap_pages(np->end - np->start); | ||
181 | |||
182 | /* | ||
183 | * This doesn't seem to be used by the Linux memory | ||
184 | * manager any more. If we can get rid of it, we | ||
185 | * also get rid of some of the stuff above as well. | ||
186 | */ | ||
187 | max_low_pfn = memend_pfn - PFN_DOWN(PHYS_OFFSET); | ||
188 | max_pfn = memend_pfn - PFN_DOWN(PHYS_OFFSET); | ||
189 | mi->end = memend_pfn << PAGE_SHIFT; | ||
190 | |||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Initialise the bootmem allocator for all nodes. This is called | ||
195 | * early during the architecture specific initialisation. | ||
196 | */ | ||
197 | void __init bootmem_init(struct meminfo *mi) | ||
198 | { | ||
199 | struct node_info node_info; | ||
200 | unsigned int bootmap_pfn; | ||
201 | pg_data_t *pgdat = NODE_DATA(0); | ||
202 | |||
203 | find_memend_and_nodes(mi, &node_info); | ||
204 | |||
205 | bootmap_pfn = find_bootmap_pfn(mi, node_info.bootmap_pages); | ||
206 | |||
207 | /* | ||
208 | * Note that node 0 must always have some pages. | ||
209 | */ | ||
210 | if (node_info.end == 0) | ||
211 | BUG(); | ||
212 | |||
213 | /* | ||
214 | * Initialise the bootmem allocator. | ||
215 | */ | ||
216 | init_bootmem_node(pgdat, bootmap_pfn, node_info.start, node_info.end); | ||
217 | |||
218 | /* | ||
219 | * Register all available RAM in this node with the bootmem allocator. | ||
220 | */ | ||
221 | free_bootmem_node(pgdat, mi->bank->start, mi->bank->size); | ||
222 | |||
223 | /* | ||
224 | * Register the kernel text and data with bootmem. | ||
225 | * Note: with XIP we dont register .text since | ||
226 | * its in ROM. | ||
227 | */ | ||
228 | #ifdef CONFIG_XIP_KERNEL | ||
229 | reserve_bootmem_node(pgdat, __pa(&_sdata), &_end - &_sdata); | ||
230 | #else | ||
231 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); | ||
232 | #endif | ||
233 | |||
234 | /* | ||
235 | * And don't forget to reserve the allocator bitmap, | ||
236 | * which will be freed later. | ||
237 | */ | ||
238 | reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT, | ||
239 | node_info.bootmap_pages << PAGE_SHIFT); | ||
240 | |||
241 | /* | ||
242 | * These should likewise go elsewhere. They pre-reserve | ||
243 | * the screen memory region at the start of main system | ||
244 | * memory. FIXME - screen RAM is not 512K! | ||
245 | */ | ||
246 | reserve_bootmem_node(pgdat, 0x02000000, 0x00080000); | ||
247 | |||
248 | #ifdef CONFIG_BLK_DEV_INITRD | ||
249 | initrd_start = phys_initrd_start; | ||
250 | initrd_end = initrd_start + phys_initrd_size; | ||
251 | |||
252 | /* Achimedes machines only have one node, so initrd is in node 0 */ | ||
253 | #ifdef CONFIG_XIP_KERNEL | ||
254 | /* Only reserve initrd space if it is in RAM */ | ||
255 | if(initrd_start && initrd_start < 0x03000000){ | ||
256 | #else | ||
257 | if(initrd_start){ | ||
258 | #endif | ||
259 | reserve_bootmem_node(pgdat, __pa(initrd_start), | ||
260 | initrd_end - initrd_start); | ||
261 | } | ||
262 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
263 | |||
264 | |||
265 | } | ||
266 | |||
267 | /* | ||
268 | * paging_init() sets up the page tables, initialises the zone memory | ||
269 | * maps, and sets up the zero page, bad page and bad page tables. | ||
270 | */ | ||
271 | void __init paging_init(struct meminfo *mi) | ||
272 | { | ||
273 | void *zero_page; | ||
274 | unsigned long zone_size[MAX_NR_ZONES]; | ||
275 | unsigned long zhole_size[MAX_NR_ZONES]; | ||
276 | struct bootmem_data *bdata; | ||
277 | pg_data_t *pgdat; | ||
278 | int i; | ||
279 | |||
280 | memcpy(&meminfo, mi, sizeof(meminfo)); | ||
281 | |||
282 | /* | ||
283 | * allocate the zero page. Note that we count on this going ok. | ||
284 | */ | ||
285 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
286 | |||
287 | /* | ||
288 | * initialise the page tables. | ||
289 | */ | ||
290 | memtable_init(mi); | ||
291 | flush_tlb_all(); | ||
292 | |||
293 | /* | ||
294 | * initialise the zones in node 0 (archimedes have only 1 node) | ||
295 | */ | ||
296 | |||
297 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
298 | zone_size[i] = 0; | ||
299 | zhole_size[i] = 0; | ||
300 | } | ||
301 | |||
302 | pgdat = NODE_DATA(0); | ||
303 | bdata = pgdat->bdata; | ||
304 | zone_size[0] = bdata->node_low_pfn - | ||
305 | (bdata->node_boot_start >> PAGE_SHIFT); | ||
306 | if (!zone_size[0]) | ||
307 | BUG(); | ||
308 | pgdat->node_mem_map = NULL; | ||
309 | free_area_init_node(0, pgdat, zone_size, | ||
310 | bdata->node_boot_start >> PAGE_SHIFT, zhole_size); | ||
311 | |||
312 | /* | ||
313 | * finish off the bad pages once | ||
314 | * the mem_map is initialised | ||
315 | */ | ||
316 | memzero(zero_page, PAGE_SIZE); | ||
317 | empty_zero_page = virt_to_page(zero_page); | ||
318 | } | ||
319 | |||
320 | static inline void free_area(unsigned long addr, unsigned long end, char *s) | ||
321 | { | ||
322 | unsigned int size = (end - addr) >> 10; | ||
323 | |||
324 | for (; addr < end; addr += PAGE_SIZE) { | ||
325 | struct page *page = virt_to_page(addr); | ||
326 | ClearPageReserved(page); | ||
327 | set_page_count(page, 1); | ||
328 | free_page(addr); | ||
329 | totalram_pages++; | ||
330 | } | ||
331 | |||
332 | if (size && s) | ||
333 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * mem_init() marks the free areas in the mem_map and tells us how much | ||
338 | * memory is free. This is done after various parts of the system have | ||
339 | * claimed their memory after the kernel image. | ||
340 | */ | ||
341 | void __init mem_init(void) | ||
342 | { | ||
343 | unsigned int codepages, datapages, initpages; | ||
344 | pg_data_t *pgdat = NODE_DATA(0); | ||
345 | extern int sysctl_overcommit_memory; | ||
346 | |||
347 | |||
348 | /* Note: data pages includes BSS */ | ||
349 | #ifdef CONFIG_XIP_KERNEL | ||
350 | codepages = &_endtext - &_text; | ||
351 | datapages = &_end - &_sdata; | ||
352 | #else | ||
353 | codepages = &_etext - &_text; | ||
354 | datapages = &_end - &_etext; | ||
355 | #endif | ||
356 | initpages = &__init_end - &__init_begin; | ||
357 | |||
358 | high_memory = (void *)__va(meminfo.end); | ||
359 | max_mapnr = virt_to_page(high_memory) - mem_map; | ||
360 | |||
361 | /* this will put all unused low memory onto the freelists */ | ||
362 | if (pgdat->node_spanned_pages != 0) | ||
363 | totalram_pages += free_all_bootmem_node(pgdat); | ||
364 | |||
365 | num_physpages = meminfo.bank[0].size >> PAGE_SHIFT; | ||
366 | |||
367 | printk(KERN_INFO "Memory: %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | ||
368 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " | ||
369 | "%dK data, %dK init)\n", | ||
370 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
371 | codepages >> 10, datapages >> 10, initpages >> 10); | ||
372 | |||
373 | /* | ||
374 | * Turn on overcommit on tiny machines | ||
375 | */ | ||
376 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | ||
377 | sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; | ||
378 | printk("Turning on overcommit\n"); | ||
379 | } | ||
380 | } | ||
381 | |||
382 | void free_initmem(void){ | ||
383 | #ifndef CONFIG_XIP_KERNEL | ||
384 | free_area((unsigned long)(&__init_begin), | ||
385 | (unsigned long)(&__init_end), | ||
386 | "init"); | ||
387 | #endif | ||
388 | } | ||
389 | |||
390 | #ifdef CONFIG_BLK_DEV_INITRD | ||
391 | |||
392 | static int keep_initrd; | ||
393 | |||
394 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
395 | { | ||
396 | #ifdef CONFIG_XIP_KERNEL | ||
397 | /* Only bin initrd if it is in RAM... */ | ||
398 | if(!keep_initrd && start < 0x03000000) | ||
399 | #else | ||
400 | if (!keep_initrd) | ||
401 | #endif | ||
402 | free_area(start, end, "initrd"); | ||
403 | } | ||
404 | |||
405 | static int __init keepinitrd_setup(char *__unused) | ||
406 | { | ||
407 | keep_initrd = 1; | ||
408 | return 1; | ||
409 | } | ||
410 | |||
411 | __setup("keepinitrd", keepinitrd_setup); | ||
412 | #endif | ||
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c new file mode 100644 index 000000000000..8e8a2bb2487d --- /dev/null +++ b/arch/arm26/mm/memc.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * linux/arch/arm26/mm/memc.c | ||
3 | * | ||
4 | * Copyright (C) 1998-2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Page table sludge for older ARM processor architectures. | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | |||
17 | #include <asm/pgtable.h> | ||
18 | #include <asm/pgalloc.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/memory.h> | ||
21 | #include <asm/hardware.h> | ||
22 | |||
23 | #include <asm/map.h> | ||
24 | |||
25 | #define MEMC_TABLE_SIZE (256*sizeof(unsigned long)) | ||
26 | |||
27 | kmem_cache_t *pte_cache, *pgd_cache; | ||
28 | int page_nr; | ||
29 | |||
30 | /* | ||
31 | * Allocate space for a page table and a MEMC table. | ||
32 | * Note that we place the MEMC | ||
33 | * table before the page directory. This means we can | ||
34 | * easily get to both tightly-associated data structures | ||
35 | * with a single pointer. | ||
36 | */ | ||
37 | static inline pgd_t *alloc_pgd_table(void) | ||
38 | { | ||
39 | void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | ||
40 | |||
41 | if (pg2k) | ||
42 | pg2k += MEMC_TABLE_SIZE; | ||
43 | |||
44 | return (pgd_t *)pg2k; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Free a page table. this function is the counterpart to get_pgd_slow | ||
49 | * below, not alloc_pgd_table above. | ||
50 | */ | ||
51 | void free_pgd_slow(pgd_t *pgd) | ||
52 | { | ||
53 | unsigned long tbl = (unsigned long)pgd; | ||
54 | |||
55 | tbl -= MEMC_TABLE_SIZE; | ||
56 | |||
57 | kmem_cache_free(pgd_cache, (void *)tbl); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Allocate a new pgd and fill it in ready for use | ||
62 | * | ||
63 | * A new tasks pgd is completely empty (all pages !present) except for: | ||
64 | * | ||
65 | * o The machine vectors at virtual address 0x0 | ||
66 | * o The vmalloc region at the top of address space | ||
67 | * | ||
68 | */ | ||
69 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | ||
70 | |||
71 | pgd_t *get_pgd_slow(struct mm_struct *mm) | ||
72 | { | ||
73 | pgd_t *new_pgd, *init_pgd; | ||
74 | pmd_t *new_pmd, *init_pmd; | ||
75 | pte_t *new_pte, *init_pte; | ||
76 | |||
77 | new_pgd = alloc_pgd_table(); | ||
78 | if (!new_pgd) | ||
79 | goto no_pgd; | ||
80 | |||
81 | /* | ||
82 | * This lock is here just to satisfy pmd_alloc and pte_lock | ||
83 | * FIXME: I bet we could avoid taking it pretty much altogether | ||
84 | */ | ||
85 | spin_lock(&mm->page_table_lock); | ||
86 | |||
87 | /* | ||
88 | * On ARM, first page must always be allocated since it contains | ||
89 | * the machine vectors. | ||
90 | */ | ||
91 | new_pmd = pmd_alloc(mm, new_pgd, 0); | ||
92 | if (!new_pmd) | ||
93 | goto no_pmd; | ||
94 | |||
95 | new_pte = pte_alloc_kernel(mm, new_pmd, 0); | ||
96 | if (!new_pte) | ||
97 | goto no_pte; | ||
98 | |||
99 | init_pgd = pgd_offset(&init_mm, 0); | ||
100 | init_pmd = pmd_offset(init_pgd, 0); | ||
101 | init_pte = pte_offset(init_pmd, 0); | ||
102 | |||
103 | set_pte(new_pte, *init_pte); | ||
104 | |||
105 | /* | ||
106 | * the page table entries are zeroed | ||
107 | * when the table is created. (see the cache_ctor functions below) | ||
108 | * Now we need to plonk the kernel (vmalloc) area at the end of | ||
109 | * the address space. We copy this from the init thread, just like | ||
110 | * the init_pte we copied above... | ||
111 | */ | ||
112 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
113 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
114 | |||
115 | spin_unlock(&mm->page_table_lock); | ||
116 | |||
117 | /* update MEMC tables */ | ||
118 | cpu_memc_update_all(new_pgd); | ||
119 | return new_pgd; | ||
120 | |||
121 | no_pte: | ||
122 | spin_unlock(&mm->page_table_lock); | ||
123 | pmd_free(new_pmd); | ||
124 | free_pgd_slow(new_pgd); | ||
125 | return NULL; | ||
126 | |||
127 | no_pmd: | ||
128 | spin_unlock(&mm->page_table_lock); | ||
129 | free_pgd_slow(new_pgd); | ||
130 | return NULL; | ||
131 | |||
132 | no_pgd: | ||
133 | return NULL; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * No special code is required here. | ||
138 | */ | ||
139 | void setup_mm_for_reboot(char mode) | ||
140 | { | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * This contains the code to setup the memory map on an ARM2/ARM250/ARM3 | ||
145 | * o swapper_pg_dir = 0x0207d000 | ||
146 | * o kernel proper starts at 0x0208000 | ||
147 | * o create (allocate) a pte to contain the machine vectors | ||
148 | * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?) | ||
149 | * o populate the init tasks page directory (pgd) with the new pte | ||
150 | * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!) | ||
151 | */ | ||
152 | void __init memtable_init(struct meminfo *mi) | ||
153 | { | ||
154 | pte_t *pte; | ||
155 | int i; | ||
156 | |||
157 | page_nr = max_low_pfn; | ||
158 | |||
159 | pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t)); | ||
160 | pte[0] = mk_pte_phys(PAGE_OFFSET + SCREEN_SIZE, PAGE_READONLY); | ||
161 | pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte); | ||
162 | |||
163 | for (i = 1; i < PTRS_PER_PGD; i++) | ||
164 | pgd_val(swapper_pg_dir[i]) = 0; | ||
165 | } | ||
166 | |||
167 | void __init iotable_init(struct map_desc *io_desc) | ||
168 | { | ||
169 | /* nothing to do */ | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * We never have holes in the memmap | ||
174 | */ | ||
175 | void __init create_memmap_holes(struct meminfo *mi) | ||
176 | { | ||
177 | } | ||
178 | |||
179 | static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) | ||
180 | { | ||
181 | memzero(pte, sizeof(pte_t) * PTRS_PER_PTE); | ||
182 | } | ||
183 | |||
184 | static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags) | ||
185 | { | ||
186 | memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t)); | ||
187 | } | ||
188 | |||
189 | void __init pgtable_cache_init(void) | ||
190 | { | ||
191 | pte_cache = kmem_cache_create("pte-cache", | ||
192 | sizeof(pte_t) * PTRS_PER_PTE, | ||
193 | 0, 0, pte_cache_ctor, NULL); | ||
194 | if (!pte_cache) | ||
195 | BUG(); | ||
196 | |||
197 | pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE + | ||
198 | sizeof(pgd_t) * PTRS_PER_PGD, | ||
199 | 0, 0, pgd_cache_ctor, NULL); | ||
200 | if (!pgd_cache) | ||
201 | BUG(); | ||
202 | } | ||
diff --git a/arch/arm26/mm/proc-funcs.S b/arch/arm26/mm/proc-funcs.S new file mode 100644 index 000000000000..c3d4cd3f457e --- /dev/null +++ b/arch/arm26/mm/proc-funcs.S | |||
@@ -0,0 +1,359 @@ | |||
1 | /* | ||
2 | * linux/arch/arm26/mm/proc-arm2,3.S | ||
3 | * | ||
4 | * Copyright (C) 1997-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * MMU functions for ARM2,3 | ||
11 | * | ||
12 | * These are the low level assembler for performing cache | ||
13 | * and memory functions on ARM2, ARM250 and ARM3 processors. | ||
14 | */ | ||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/assembler.h> | ||
17 | #include <asm/asm_offsets.h> | ||
18 | #include <asm/procinfo.h> | ||
19 | #include <asm/ptrace.h> | ||
20 | |||
21 | /* | ||
22 | * MEMC workhorse code. It's both a horse which things it's a pig. | ||
23 | */ | ||
24 | /* | ||
25 | * Function: cpu_memc_update_entry(pgd_t *pgd, unsigned long phys_pte, unsigned long addr) | ||
26 | * Params : pgd Page tables/MEMC mapping | ||
27 | * : phys_pte physical address, or PTE | ||
28 | * : addr virtual address | ||
29 | */ | ||
30 | ENTRY(cpu_memc_update_entry) | ||
31 | tst r1, #PAGE_PRESENT @ is the page present | ||
32 | orreq r1, r1, #PAGE_OLD | PAGE_CLEAN | ||
33 | moveq r2, #0x01f00000 | ||
34 | mov r3, r1, lsr #13 @ convert to physical page nr | ||
35 | and r3, r3, #0x3fc | ||
36 | adr ip, memc_phys_table_32 | ||
37 | ldr r3, [ip, r3] | ||
38 | tst r1, #PAGE_OLD | PAGE_NOT_USER | ||
39 | biceq r3, r3, #0x200 | ||
40 | tsteq r1, #PAGE_READONLY | PAGE_CLEAN | ||
41 | biceq r3, r3, #0x300 | ||
42 | mov r2, r2, lsr #15 @ virtual -> nr | ||
43 | orr r3, r3, r2, lsl #15 | ||
44 | and r2, r2, #0x300 | ||
45 | orr r3, r3, r2, lsl #2 | ||
46 | and r2, r3, #255 | ||
47 | sub r0, r0, #256 * 4 | ||
48 | str r3, [r0, r2, lsl #2] | ||
49 | strb r3, [r3] | ||
50 | movs pc, lr | ||
51 | /* | ||
52 | * Params : r0 = preserved | ||
53 | * : r1 = memc table base (preserved) | ||
54 | * : r2 = page table entry | ||
55 | * : r3 = preserved | ||
56 | * : r4 = unused | ||
57 | * : r5 = memc physical address translation table | ||
58 | * : ip = virtual address (preserved) | ||
59 | */ | ||
60 | update_pte: | ||
61 | mov r4, r2, lsr #13 | ||
62 | and r4, r4, #0x3fc | ||
63 | ldr r4, [r5, r4] @ covert to MEMC page | ||
64 | |||
65 | tst r2, #PAGE_OLD | PAGE_NOT_USER @ check for MEMC read | ||
66 | biceq r4, r4, #0x200 | ||
67 | tsteq r2, #PAGE_READONLY | PAGE_CLEAN @ check for MEMC write | ||
68 | biceq r4, r4, #0x300 | ||
69 | |||
70 | orr r4, r4, ip | ||
71 | and r2, ip, #0x01800000 | ||
72 | orr r4, r4, r2, lsr #13 | ||
73 | |||
74 | and r2, r4, #255 | ||
75 | str r4, [r1, r2, lsl #2] | ||
76 | movs pc, lr | ||
77 | |||
78 | /* | ||
79 | * Params : r0 = preserved | ||
80 | * : r1 = memc table base (preserved) | ||
81 | * : r2 = page table base | ||
82 | * : r3 = preserved | ||
83 | * : r4 = unused | ||
84 | * : r5 = memc physical address translation table | ||
85 | * : ip = virtual address (updated) | ||
86 | */ | ||
87 | update_pte_table: | ||
88 | stmfd sp!, {r0, lr} | ||
89 | bic r0, r2, #3 | ||
90 | 1: ldr r2, [r0], #4 @ get entry | ||
91 | tst r2, #PAGE_PRESENT @ page present | ||
92 | blne update_pte @ process pte | ||
93 | add ip, ip, #32768 @ increment virt addr | ||
94 | ldr r2, [r0], #4 @ get entry | ||
95 | tst r2, #PAGE_PRESENT @ page present | ||
96 | blne update_pte @ process pte | ||
97 | add ip, ip, #32768 @ increment virt addr | ||
98 | ldr r2, [r0], #4 @ get entry | ||
99 | tst r2, #PAGE_PRESENT @ page present | ||
100 | blne update_pte @ process pte | ||
101 | add ip, ip, #32768 @ increment virt addr | ||
102 | ldr r2, [r0], #4 @ get entry | ||
103 | tst r2, #PAGE_PRESENT @ page present | ||
104 | blne update_pte @ process pte | ||
105 | add ip, ip, #32768 @ increment virt addr | ||
106 | tst ip, #32768 * 31 @ finished? | ||
107 | bne 1b | ||
108 | ldmfd sp!, {r0, pc}^ | ||
109 | |||
110 | /* | ||
111 | * Function: cpu_memc_update_all(pgd_t *pgd) | ||
112 | * Params : pgd Page tables/MEMC mapping | ||
113 | * Notes : this is optimised for 32k pages | ||
114 | */ | ||
115 | ENTRY(cpu_memc_update_all) | ||
116 | stmfd sp!, {r4, r5, lr} | ||
117 | bl clear_tables | ||
118 | sub r1, r0, #256 * 4 @ start of MEMC tables | ||
119 | adr r5, memc_phys_table_32 @ Convert to logical page number | ||
120 | mov ip, #0 @ virtual address | ||
121 | 1: ldmia r0!, {r2, r3} @ load two pgd entries | ||
122 | tst r2, #PAGE_PRESENT @ is pgd entry present? | ||
123 | addeq ip, ip, #1048576 @FIXME - PAGE_PRESENT is for PTEs technically... | ||
124 | blne update_pte_table | ||
125 | mov r2, r3 | ||
126 | tst r2, #PAGE_PRESENT @ is pgd entry present? | ||
127 | addeq ip, ip, #1048576 | ||
128 | blne update_pte_table | ||
129 | teq ip, #32 * 1048576 | ||
130 | bne 1b | ||
131 | ldmfd sp!, {r4, r5, pc}^ | ||
132 | |||
133 | /* | ||
134 | * Build the table to map from physical page number to memc page number | ||
135 | */ | ||
136 | .type memc_phys_table_32, #object | ||
137 | memc_phys_table_32: | ||
138 | .irp b7, 0x00, 0x80 | ||
139 | .irp b6, 0x00, 0x02 | ||
140 | .irp b5, 0x00, 0x04 | ||
141 | .irp b4, 0x00, 0x01 | ||
142 | |||
143 | .irp b3, 0x00, 0x40 | ||
144 | .irp b2, 0x00, 0x20 | ||
145 | .irp b1, 0x00, 0x10 | ||
146 | .irp b0, 0x00, 0x08 | ||
147 | .long 0x03800300 + \b7 + \b6 + \b5 + \b4 + \b3 + \b2 + \b1 + \b0 | ||
148 | .endr | ||
149 | .endr | ||
150 | .endr | ||
151 | .endr | ||
152 | |||
153 | .endr | ||
154 | .endr | ||
155 | .endr | ||
156 | .endr | ||
157 | .size memc_phys_table_32, . - memc_phys_table_32 | ||
158 | |||
159 | /* | ||
160 | * helper for cpu_memc_update_all, this clears out all | ||
161 | * mappings, setting them close to the top of memory, | ||
162 | * and inaccessible (0x01f00000). | ||
163 | * Params : r0 = page table pointer | ||
164 | */ | ||
165 | clear_tables: ldr r1, _arm3_set_pgd - 4 | ||
166 | ldr r2, [r1] | ||
167 | sub r1, r0, #256 * 4 @ start of MEMC tables | ||
168 | add r2, r1, r2, lsl #2 @ end of tables | ||
169 | mov r3, #0x03f00000 @ Default mapping (null mapping) | ||
170 | orr r3, r3, #0x00000f00 | ||
171 | orr r4, r3, #1 | ||
172 | orr r5, r3, #2 | ||
173 | orr ip, r3, #3 | ||
174 | 1: stmia r1!, {r3, r4, r5, ip} | ||
175 | add r3, r3, #4 | ||
176 | add r4, r4, #4 | ||
177 | add r5, r5, #4 | ||
178 | add ip, ip, #4 | ||
179 | stmia r1!, {r3, r4, r5, ip} | ||
180 | add r3, r3, #4 | ||
181 | add r4, r4, #4 | ||
182 | add r5, r5, #4 | ||
183 | add ip, ip, #4 | ||
184 | teq r1, r2 | ||
185 | bne 1b | ||
186 | mov pc, lr | ||
187 | |||
188 | /* | ||
189 | * Function: *_set_pgd(pgd_t *pgd) | ||
190 | * Params : pgd New page tables/MEMC mapping | ||
191 | * Purpose : update MEMC hardware with new mapping | ||
192 | */ | ||
193 | .word page_nr @ extern - declared in mm-memc.c | ||
194 | _arm3_set_pgd: mcr p15, 0, r1, c1, c0, 0 @ flush cache | ||
195 | _arm2_set_pgd: stmfd sp!, {lr} | ||
196 | ldr r1, _arm3_set_pgd - 4 | ||
197 | ldr r2, [r1] | ||
198 | sub r0, r0, #256 * 4 @ start of MEMC tables | ||
199 | add r1, r0, r2, lsl #2 @ end of tables | ||
200 | 1: ldmia r0!, {r2, r3, ip, lr} | ||
201 | strb r2, [r2] | ||
202 | strb r3, [r3] | ||
203 | strb ip, [ip] | ||
204 | strb lr, [lr] | ||
205 | ldmia r0!, {r2, r3, ip, lr} | ||
206 | strb r2, [r2] | ||
207 | strb r3, [r3] | ||
208 | strb ip, [ip] | ||
209 | strb lr, [lr] | ||
210 | teq r0, r1 | ||
211 | bne 1b | ||
212 | ldmfd sp!, {pc}^ | ||
213 | |||
214 | /* | ||
215 | * Function: *_proc_init (void) | ||
216 | * Purpose : Initialise the cache control registers | ||
217 | */ | ||
218 | _arm3_proc_init: | ||
219 | mov r0, #0x001f0000 | ||
220 | orr r0, r0, #0x0000ff00 | ||
221 | orr r0, r0, #0x000000ff | ||
222 | mcr p15, 0, r0, c3, c0 @ ARM3 Cacheable | ||
223 | mcr p15, 0, r0, c4, c0 @ ARM3 Updateable | ||
224 | mov r0, #0 | ||
225 | mcr p15, 0, r0, c5, c0 @ ARM3 Disruptive | ||
226 | mcr p15, 0, r0, c1, c0 @ ARM3 Flush | ||
227 | mov r0, #3 | ||
228 | mcr p15, 0, r0, c2, c0 @ ARM3 Control | ||
229 | _arm2_proc_init: | ||
230 | movs pc, lr | ||
231 | |||
232 | /* | ||
233 | * Function: *_proc_fin (void) | ||
234 | * Purpose : Finalise processor (disable caches) | ||
235 | */ | ||
236 | _arm3_proc_fin: mov r0, #2 | ||
237 | mcr p15, 0, r0, c2, c0 | ||
238 | _arm2_proc_fin: orrs pc, lr, #PSR_I_BIT|PSR_F_BIT | ||
239 | |||
240 | /* | ||
241 | * Function: *_xchg_1 (int new, volatile void *ptr) | ||
242 | * Params : new New value to store at... | ||
243 | * : ptr pointer to byte-wide location | ||
244 | * Purpose : Performs an exchange operation | ||
245 | * Returns : Original byte data at 'ptr' | ||
246 | */ | ||
247 | _arm2_xchg_1: mov r2, pc | ||
248 | orr r2, r2, #PSR_I_BIT | ||
249 | teqp r2, #0 | ||
250 | ldrb r2, [r1] | ||
251 | strb r0, [r1] | ||
252 | mov r0, r2 | ||
253 | movs pc, lr | ||
254 | |||
255 | _arm3_xchg_1: swpb r0, r0, [r1] | ||
256 | movs pc, lr | ||
257 | |||
258 | /* | ||
259 | * Function: *_xchg_4 (int new, volatile void *ptr) | ||
260 | * Params : new New value to store at... | ||
261 | * : ptr pointer to word-wide location | ||
262 | * Purpose : Performs an exchange operation | ||
263 | * Returns : Original word data at 'ptr' | ||
264 | */ | ||
265 | _arm2_xchg_4: mov r2, pc | ||
266 | orr r2, r2, #PSR_I_BIT | ||
267 | teqp r2, #0 | ||
268 | ldr r2, [r1] | ||
269 | str r0, [r1] | ||
270 | mov r0, r2 | ||
271 | movs pc, lr | ||
272 | |||
273 | _arm3_xchg_4: swp r0, r0, [r1] | ||
274 | movs pc, lr | ||
275 | |||
276 | _arm2_3_check_bugs: | ||
277 | bics pc, lr, #PSR_F_BIT @ Clear FIQ disable bit | ||
278 | |||
279 | armvlsi_name: .asciz "ARM/VLSI" | ||
280 | _arm2_name: .asciz "ARM 2" | ||
281 | _arm250_name: .asciz "ARM 250" | ||
282 | _arm3_name: .asciz "ARM 3" | ||
283 | |||
284 | .section ".init.text", #alloc, #execinstr | ||
285 | /* | ||
286 | * Purpose : Function pointers used to access above functions - all calls | ||
287 | * come through these | ||
288 | */ | ||
289 | .globl arm2_processor_functions | ||
290 | arm2_processor_functions: | ||
291 | .word _arm2_3_check_bugs | ||
292 | .word _arm2_proc_init | ||
293 | .word _arm2_proc_fin | ||
294 | .word _arm2_set_pgd | ||
295 | .word _arm2_xchg_1 | ||
296 | .word _arm2_xchg_4 | ||
297 | |||
298 | cpu_arm2_info: | ||
299 | .long armvlsi_name | ||
300 | .long _arm2_name | ||
301 | |||
302 | .globl arm250_processor_functions | ||
303 | arm250_processor_functions: | ||
304 | .word _arm2_3_check_bugs | ||
305 | .word _arm2_proc_init | ||
306 | .word _arm2_proc_fin | ||
307 | .word _arm2_set_pgd | ||
308 | .word _arm3_xchg_1 | ||
309 | .word _arm3_xchg_4 | ||
310 | |||
311 | cpu_arm250_info: | ||
312 | .long armvlsi_name | ||
313 | .long _arm250_name | ||
314 | |||
315 | .globl arm3_processor_functions | ||
316 | arm3_processor_functions: | ||
317 | .word _arm2_3_check_bugs | ||
318 | .word _arm3_proc_init | ||
319 | .word _arm3_proc_fin | ||
320 | .word _arm3_set_pgd | ||
321 | .word _arm3_xchg_1 | ||
322 | .word _arm3_xchg_4 | ||
323 | |||
324 | cpu_arm3_info: | ||
325 | .long armvlsi_name | ||
326 | .long _arm3_name | ||
327 | |||
328 | arm2_arch_name: .asciz "armv1" | ||
329 | arm3_arch_name: .asciz "armv2" | ||
330 | arm2_elf_name: .asciz "v1" | ||
331 | arm3_elf_name: .asciz "v2" | ||
332 | .align | ||
333 | |||
334 | .section ".proc.info", #alloc, #execinstr | ||
335 | |||
336 | .long 0x41560200 | ||
337 | .long 0xfffffff0 | ||
338 | .long arm2_arch_name | ||
339 | .long arm2_elf_name | ||
340 | .long 0 | ||
341 | .long cpu_arm2_info | ||
342 | .long arm2_processor_functions | ||
343 | |||
344 | .long 0x41560250 | ||
345 | .long 0xfffffff0 | ||
346 | .long arm3_arch_name | ||
347 | .long arm3_elf_name | ||
348 | .long 0 | ||
349 | .long cpu_arm250_info | ||
350 | .long arm250_processor_functions | ||
351 | |||
352 | .long 0x41560300 | ||
353 | .long 0xfffffff0 | ||
354 | .long arm3_arch_name | ||
355 | .long arm3_elf_name | ||
356 | .long 0 | ||
357 | .long cpu_arm3_info | ||
358 | .long arm3_processor_functions | ||
359 | |||
diff --git a/arch/arm26/mm/small_page.c b/arch/arm26/mm/small_page.c new file mode 100644 index 000000000000..77be86cca789 --- /dev/null +++ b/arch/arm26/mm/small_page.c | |||
@@ -0,0 +1,194 @@ | |||
1 | /* | ||
2 | * linux/arch/arm26/mm/small_page.c | ||
3 | * | ||
4 | * Copyright (C) 1996 Russell King | ||
5 | * Copyright (C) 2003 Ian Molton | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Changelog: | ||
12 | * 26/01/1996 RMK Cleaned up various areas to make little more generic | ||
13 | * 07/02/1999 RMK Support added for 16K and 32K page sizes | ||
14 | * containing 8K blocks | ||
15 | * 23/05/2004 IM Fixed to use struct page->lru (thanks wli) | ||
16 | * | ||
17 | */ | ||
18 | #include <linux/signal.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/swap.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/bitops.h> | ||
30 | |||
31 | #include <asm/pgtable.h> | ||
32 | |||
33 | #define PEDANTIC | ||
34 | |||
35 | /* | ||
36 | * Requirement: | ||
37 | * We need to be able to allocate naturally aligned memory of finer | ||
38 | * granularity than the page size. This is typically used for the | ||
39 | * second level page tables on 32-bit ARMs. | ||
40 | * | ||
41 | * FIXME - this comment is *out of date* | ||
42 | * Theory: | ||
43 | * We "misuse" the Linux memory management system. We use alloc_page | ||
44 | * to allocate a page and then mark it as reserved. The Linux memory | ||
45 | * management system will then ignore the "offset", "next_hash" and | ||
46 | * "pprev_hash" entries in the mem_map for this page. | ||
47 | * | ||
48 | * We then use a bitstring in the "offset" field to mark which segments | ||
49 | * of the page are in use, and manipulate this as required during the | ||
50 | * allocation and freeing of these small pages. | ||
51 | * | ||
52 | * We also maintain a queue of pages being used for this purpose using | ||
53 | * the "next_hash" and "pprev_hash" entries of mem_map; | ||
54 | */ | ||
55 | |||
56 | struct order { | ||
57 | struct list_head queue; | ||
58 | unsigned int mask; /* (1 << shift) - 1 */ | ||
59 | unsigned int shift; /* (1 << shift) size of page */ | ||
60 | unsigned int block_mask; /* nr_blocks - 1 */ | ||
61 | unsigned int all_used; /* (1 << nr_blocks) - 1 */ | ||
62 | }; | ||
63 | |||
64 | |||
65 | static struct order orders[] = { | ||
66 | #if PAGE_SIZE == 32768 | ||
67 | { LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff }, | ||
68 | { LIST_HEAD_INIT(orders[1].queue), 8191, 13, 3, 0x0000000f } | ||
69 | #else | ||
70 | #error unsupported page size (ARGH!) | ||
71 | #endif | ||
72 | }; | ||
73 | |||
74 | #define USED_MAP(pg) ((pg)->index) | ||
75 | #define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg))) | ||
76 | #define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg))) | ||
77 | |||
78 | static DEFINE_SPINLOCK(small_page_lock); | ||
79 | |||
80 | static unsigned long __get_small_page(int priority, struct order *order) | ||
81 | { | ||
82 | unsigned long flags; | ||
83 | struct page *page; | ||
84 | int offset; | ||
85 | |||
86 | do { | ||
87 | spin_lock_irqsave(&small_page_lock, flags); | ||
88 | |||
89 | if (list_empty(&order->queue)) | ||
90 | goto need_new_page; | ||
91 | |||
92 | page = list_entry(order->queue.next, struct page, lru); | ||
93 | again: | ||
94 | #ifdef PEDANTIC | ||
95 | if (USED_MAP(page) & ~order->all_used) | ||
96 | PAGE_BUG(page); | ||
97 | #endif | ||
98 | offset = ffz(USED_MAP(page)); | ||
99 | SET_USED(page, offset); | ||
100 | if (USED_MAP(page) == order->all_used) | ||
101 | list_del_init(&page->lru); | ||
102 | spin_unlock_irqrestore(&small_page_lock, flags); | ||
103 | |||
104 | return (unsigned long) page_address(page) + (offset << order->shift); | ||
105 | |||
106 | need_new_page: | ||
107 | spin_unlock_irqrestore(&small_page_lock, flags); | ||
108 | page = alloc_page(priority); | ||
109 | spin_lock_irqsave(&small_page_lock, flags); | ||
110 | |||
111 | if (list_empty(&order->queue)) { | ||
112 | if (!page) | ||
113 | goto no_page; | ||
114 | SetPageReserved(page); | ||
115 | USED_MAP(page) = 0; | ||
116 | list_add(&page->lru, &order->queue); | ||
117 | goto again; | ||
118 | } | ||
119 | |||
120 | spin_unlock_irqrestore(&small_page_lock, flags); | ||
121 | __free_page(page); | ||
122 | } while (1); | ||
123 | |||
124 | no_page: | ||
125 | spin_unlock_irqrestore(&small_page_lock, flags); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static void __free_small_page(unsigned long spage, struct order *order) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | struct page *page; | ||
133 | |||
134 | if (virt_addr_valid(spage)) { | ||
135 | page = virt_to_page(spage); | ||
136 | |||
137 | /* | ||
138 | * The container-page must be marked Reserved | ||
139 | */ | ||
140 | if (!PageReserved(page) || spage & order->mask) | ||
141 | goto non_small; | ||
142 | |||
143 | #ifdef PEDANTIC | ||
144 | if (USED_MAP(page) & ~order->all_used) | ||
145 | PAGE_BUG(page); | ||
146 | #endif | ||
147 | |||
148 | spage = spage >> order->shift; | ||
149 | spage &= order->block_mask; | ||
150 | |||
151 | /* | ||
152 | * the following must be atomic wrt get_page | ||
153 | */ | ||
154 | spin_lock_irqsave(&small_page_lock, flags); | ||
155 | |||
156 | if (USED_MAP(page) == order->all_used) | ||
157 | list_add(&page->lru, &order->queue); | ||
158 | |||
159 | if (!TEST_AND_CLEAR_USED(page, spage)) | ||
160 | goto already_free; | ||
161 | |||
162 | if (USED_MAP(page) == 0) | ||
163 | goto free_page; | ||
164 | |||
165 | spin_unlock_irqrestore(&small_page_lock, flags); | ||
166 | } | ||
167 | return; | ||
168 | |||
169 | free_page: | ||
170 | /* | ||
171 | * unlink the page from the small page queue and free it | ||
172 | */ | ||
173 | list_del_init(&page->lru); | ||
174 | spin_unlock_irqrestore(&small_page_lock, flags); | ||
175 | ClearPageReserved(page); | ||
176 | __free_page(page); | ||
177 | return; | ||
178 | |||
179 | non_small: | ||
180 | printk("Trying to free non-small page from %p\n", __builtin_return_address(0)); | ||
181 | return; | ||
182 | already_free: | ||
183 | printk("Trying to free free small page from %p\n", __builtin_return_address(0)); | ||
184 | } | ||
185 | |||
186 | unsigned long get_page_8k(int priority) | ||
187 | { | ||
188 | return __get_small_page(priority, orders+1); | ||
189 | } | ||
190 | |||
191 | void free_page_8k(unsigned long spage) | ||
192 | { | ||
193 | __free_small_page(spage, orders+1); | ||
194 | } | ||