aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm26/mm
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@stusta.de>2007-07-31 03:38:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-31 18:39:39 -0400
commit99eb8a550dbccc0e1f6c7e866fe421810e0585f6 (patch)
tree130c6e3338a0655ba74355eba83afab9261e1ed0 /arch/arm26/mm
parent0d0ed42e5ca2e22465c591341839c18025748fe8 (diff)
Remove the arm26 port
The arm26 port has been in a state where it was far from even compiling for quite some time. Ian Molton agreed with the removal. Signed-off-by: Adrian Bunk <bunk@stusta.de> Cc: Ian Molton <spyro@f2s.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm26/mm')
-rw-r--r--arch/arm26/mm/Makefile6
-rw-r--r--arch/arm26/mm/extable.c24
-rw-r--r--arch/arm26/mm/fault.c312
-rw-r--r--arch/arm26/mm/fault.h5
-rw-r--r--arch/arm26/mm/init.c403
-rw-r--r--arch/arm26/mm/memc.c184
-rw-r--r--arch/arm26/mm/proc-funcs.S359
-rw-r--r--arch/arm26/mm/small_page.c192
8 files changed, 0 insertions, 1485 deletions
diff --git a/arch/arm26/mm/Makefile b/arch/arm26/mm/Makefile
deleted file mode 100644
index a8fb166d5c6d..000000000000
--- a/arch/arm26/mm/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1#
2# Makefile for the linux arm26-specific parts of the memory manager.
3#
4
5obj-y := init.o extable.o proc-funcs.o memc.o fault.o \
6 small_page.o
diff --git a/arch/arm26/mm/extable.c b/arch/arm26/mm/extable.c
deleted file mode 100644
index 38e1958d9538..000000000000
--- a/arch/arm26/mm/extable.c
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * linux/arch/arm26/mm/extable.c
3 */
4
5#include <linux/module.h>
6#include <asm/uaccess.h>
7
8int fixup_exception(struct pt_regs *regs)
9{
10 const struct exception_table_entry *fixup;
11
12 fixup = search_exception_tables(instruction_pointer(regs));
13
14 /*
15 * The kernel runs in SVC mode - make sure we keep running in SVC mode
16 * by frobbing the PSR appropriately (PSR and PC are in the same reg.
17 * on ARM26)
18 */
19 if (fixup)
20 regs->ARM_pc = fixup->fixup | PSR_I_BIT | MODE_SVC26;
21
22 return fixup != NULL;
23}
24
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c
deleted file mode 100644
index dec638a0c8d9..000000000000
--- a/arch/arm26/mm/fault.c
+++ /dev/null
@@ -1,312 +0,0 @@
1/*
2 * linux/arch/arm26/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2001 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/interrupt.h>
21#include <linux/proc_fs.h>
22#include <linux/init.h>
23
24#include <asm/system.h>
25#include <asm/pgtable.h>
26#include <asm/uaccess.h> //FIXME this header may be bogusly included
27
28#include "fault.h"
29
30#define FAULT_CODE_LDRSTRPOST 0x80
31#define FAULT_CODE_LDRSTRPRE 0x40
32#define FAULT_CODE_LDRSTRREG 0x20
33#define FAULT_CODE_LDMSTM 0x10
34#define FAULT_CODE_LDCSTC 0x08
35#define FAULT_CODE_PREFETCH 0x04
36#define FAULT_CODE_WRITE 0x02
37#define FAULT_CODE_FORCECOW 0x01
38
39#define DO_COW(m) ((m) & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW))
40#define READ_FAULT(m) (!((m) & FAULT_CODE_WRITE))
41#define DEBUG
42/*
43 * This is useful to dump out the page tables associated with
44 * 'addr' in mm 'mm'.
45 */
46void show_pte(struct mm_struct *mm, unsigned long addr)
47{
48 pgd_t *pgd;
49
50 if (!mm)
51 mm = &init_mm;
52
53 printk(KERN_ALERT "pgd = %p\n", mm->pgd);
54 pgd = pgd_offset(mm, addr);
55 printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
56
57 do {
58 pmd_t *pmd;
59 pte_t *pte;
60
61 pmd = pmd_offset(pgd, addr);
62
63 if (pmd_none(*pmd))
64 break;
65
66 if (pmd_bad(*pmd)) {
67 printk("(bad)");
68 break;
69 }
70
71 /* We must not map this if we have highmem enabled */
72 /* FIXME */
73 pte = pte_offset_map(pmd, addr);
74 printk(", *pte=%08lx", pte_val(*pte));
75 pte_unmap(pte);
76 } while(0);
77
78 printk("\n");
79}
80
81/*
82 * Oops. The kernel tried to access some page that wasn't present.
83 */
84static void
85__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
86 struct pt_regs *regs)
87{
88 /*
89 * Are we prepared to handle this kernel fault?
90 */
91 if (fixup_exception(regs))
92 return;
93
94 /*
95 * No handler, we'll have to terminate things with extreme prejudice.
96 */
97 bust_spinlocks(1);
98 printk(KERN_ALERT
99 "Unable to handle kernel %s at virtual address %08lx\n",
100 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
101 "paging request", addr);
102
103 show_pte(mm, addr);
104 die("Oops", regs, fsr);
105 bust_spinlocks(0);
106 do_exit(SIGKILL);
107}
108
109/*
110 * Something tried to access memory that isn't in our memory map..
111 * User mode accesses just cause a SIGSEGV
112 */
113static void
114__do_user_fault(struct task_struct *tsk, unsigned long addr,
115 unsigned int fsr, int code, struct pt_regs *regs)
116{
117 struct siginfo si;
118
119#ifdef CONFIG_DEBUG_USER
120 printk("%s: unhandled page fault at 0x%08lx, code 0x%03x\n",
121 tsk->comm, addr, fsr);
122 show_pte(tsk->mm, addr);
123 show_regs(regs);
124 //dump_backtrace(regs, tsk); // FIXME ARM32 dropped this - why?
125 while(1); //FIXME - hack to stop debug going nutso
126#endif
127
128 tsk->thread.address = addr;
129 tsk->thread.error_code = fsr;
130 tsk->thread.trap_no = 14;
131 si.si_signo = SIGSEGV;
132 si.si_errno = 0;
133 si.si_code = code;
134 si.si_addr = (void *)addr;
135 force_sig_info(SIGSEGV, &si, tsk);
136}
137
138static int
139__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
140 struct task_struct *tsk)
141{
142 struct vm_area_struct *vma;
143 int fault, mask;
144
145 vma = find_vma(mm, addr);
146 fault = -2; /* bad map area */
147 if (!vma)
148 goto out;
149 if (vma->vm_start > addr)
150 goto check_stack;
151
152 /*
153 * Ok, we have a good vm_area for this
154 * memory access, so we can handle it.
155 */
156good_area:
157 if (READ_FAULT(fsr)) /* read? */
158 mask = VM_READ|VM_EXEC|VM_WRITE;
159 else
160 mask = VM_WRITE;
161
162 fault = -1; /* bad access type */
163 if (!(vma->vm_flags & mask))
164 goto out;
165
166 /*
167 * If for any reason at all we couldn't handle
168 * the fault, make sure we exit gracefully rather
169 * than endlessly redo the fault.
170 */
171survive:
172 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr));
173 if (unlikely(fault & VM_FAULT_ERROR)) {
174 if (fault & VM_FAULT_OOM)
175 goto out_of_memory;
176 else if (fault & VM_FAULT_SIGBUS)
177 return fault;
178 BUG();
179 }
180 if (fault & VM_FAULT_MAJOR)
181 tsk->maj_flt++;
182 else
183 tsk->min_flt++;
184 return fault;
185
186out_of_memory:
187 fault = -3; /* out of memory */
188 if (!is_init(tsk))
189 goto out;
190
191 /*
192 * If we are out of memory for pid1,
193 * sleep for a while and retry
194 */
195 yield();
196 goto survive;
197
198check_stack:
199 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
200 goto good_area;
201out:
202 return fault;
203}
204
205int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
206{
207 struct task_struct *tsk;
208 struct mm_struct *mm;
209 int fault;
210
211 tsk = current;
212 mm = tsk->mm;
213
214 /*
215 * If we're in an interrupt or have no user
216 * context, we must not take the fault..
217 */
218 if (in_atomic() || !mm)
219 goto no_context;
220
221 down_read(&mm->mmap_sem);
222 fault = __do_page_fault(mm, addr, fsr, tsk);
223 up_read(&mm->mmap_sem);
224
225 /*
226 * Handle the "normal" case first
227 */
228 if (likely(!(fault & VM_FAULT_ERROR)))
229 return 0;
230 if (fault & VM_FAULT_SIGBUS)
231 goto do_sigbus;
232 /* else VM_FAULT_OOM */
233
234 /*
235 * If we are in kernel mode at this point, we
236 * have no context to handle this fault with.
237 * FIXME - is this test right?
238 */
239 if (!user_mode(regs)){
240 goto no_context;
241 }
242
243 if (fault == -3) {
244 /*
245 * We ran out of memory, or some other thing happened to
246 * us that made us unable to handle the page fault gracefully.
247 */
248 printk("VM: killing process %s\n", tsk->comm);
249 do_exit(SIGKILL);
250 }
251 else{
252 __do_user_fault(tsk, addr, fsr, fault == -1 ? SEGV_ACCERR : SEGV_MAPERR, regs);
253 }
254
255 return 0;
256
257
258/*
259 * We ran out of memory, or some other thing happened to us that made
260 * us unable to handle the page fault gracefully.
261 */
262do_sigbus:
263 /*
264 * Send a sigbus, regardless of whether we were in kernel
265 * or user mode.
266 */
267 tsk->thread.address = addr; //FIXME - need other bits setting?
268 tsk->thread.error_code = fsr;
269 tsk->thread.trap_no = 14;
270 force_sig(SIGBUS, tsk);
271#ifdef CONFIG_DEBUG_USER
272 printk(KERN_DEBUG "%s: sigbus at 0x%08lx, pc=0x%08lx\n",
273 current->comm, addr, instruction_pointer(regs));
274#endif
275
276 /* Kernel mode? Handle exceptions or die */
277 if (user_mode(regs))
278 return 0;
279
280no_context:
281 __do_kernel_fault(mm, addr, fsr, regs);
282 return 0;
283}
284
285/*
286 * Handle a data abort. Note that we have to handle a range of addresses
287 * on ARM2/3 for ldm. If both pages are zero-mapped, then we have to force
288 * a copy-on-write. However, on the second page, we always force COW.
289 */
290asmlinkage void
291do_DataAbort(unsigned long min_addr, unsigned long max_addr, int mode, struct pt_regs *regs)
292{
293 do_page_fault(min_addr, mode, regs);
294
295 if ((min_addr ^ max_addr) >> PAGE_SHIFT){
296 do_page_fault(max_addr, mode | FAULT_CODE_FORCECOW, regs);
297 }
298}
299
300asmlinkage int
301do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
302{
303#if 0
304 if (the memc mapping for this page exists) {
305 printk ("Page in, but got abort (undefined instruction?)\n");
306 return 0;
307 }
308#endif
309 do_page_fault(addr, FAULT_CODE_PREFETCH, regs);
310 return 1;
311}
312
diff --git a/arch/arm26/mm/fault.h b/arch/arm26/mm/fault.h
deleted file mode 100644
index 4442d00d86ac..000000000000
--- a/arch/arm26/mm/fault.h
+++ /dev/null
@@ -1,5 +0,0 @@
1void show_pte(struct mm_struct *mm, unsigned long addr);
2
3int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4
5unsigned long search_extable(unsigned long addr); //FIXME - is it right?
diff --git a/arch/arm26/mm/init.c b/arch/arm26/mm/init.c
deleted file mode 100644
index 36e7ee3f8321..000000000000
--- a/arch/arm26/mm/init.c
+++ /dev/null
@@ -1,403 +0,0 @@
1/*
2 * linux/arch/arm26/mm/init.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/initrd.h>
23#include <linux/bootmem.h>
24#include <linux/blkdev.h>
25#include <linux/pfn.h>
26
27#include <asm/segment.h>
28#include <asm/mach-types.h>
29#include <asm/dma.h>
30#include <asm/hardware.h>
31#include <asm/setup.h>
32#include <asm/tlb.h>
33
34#include <asm/map.h>
35
36struct mmu_gather mmu_gathers[NR_CPUS];
37
38extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
39extern char _stext, _text, _etext, _end, __init_begin, __init_end;
40#ifdef CONFIG_XIP_KERNEL
41extern char _endtext, _sdata;
42#endif
43extern unsigned long phys_initrd_start;
44extern unsigned long phys_initrd_size;
45
46/*
47 * The sole use of this is to pass memory configuration
48 * data from paging_init to mem_init.
49 */
50static struct meminfo meminfo __initdata = { 0, };
51
52/*
53 * empty_zero_page is a special page that is used for
54 * zero-initialized data and COW.
55 */
56struct page *empty_zero_page;
57
58void show_mem(void)
59{
60 int free = 0, total = 0, reserved = 0;
61 int shared = 0, cached = 0, slab = 0;
62 struct page *page, *end;
63
64 printk("Mem-info:\n");
65 show_free_areas();
66 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
67
68
69 page = NODE_MEM_MAP(0);
70 end = page + NODE_DATA(0)->node_spanned_pages;
71
72 do {
73 total++;
74 if (PageReserved(page))
75 reserved++;
76 else if (PageSwapCache(page))
77 cached++;
78 else if (PageSlab(page))
79 slab++;
80 else if (!page_count(page))
81 free++;
82 else
83 shared += page_count(page) - 1;
84 page++;
85 } while (page < end);
86
87 printk("%d pages of RAM\n", total);
88 printk("%d free pages\n", free);
89 printk("%d reserved pages\n", reserved);
90 printk("%d slab pages\n", slab);
91 printk("%d pages shared\n", shared);
92 printk("%d pages swap cached\n", cached);
93}
94
95struct node_info {
96 unsigned int start;
97 unsigned int end;
98 int bootmap_pages;
99};
100
101/*
102 * FIXME: We really want to avoid allocating the bootmap bitmap
103 * over the top of the initrd. Hopefully, this is located towards
104 * the start of a bank, so if we allocate the bootmap bitmap at
105 * the end, we won't clash.
106 */
107static unsigned int __init
108find_bootmap_pfn(struct meminfo *mi, unsigned int bootmap_pages)
109{
110 unsigned int start_pfn, bootmap_pfn;
111 unsigned int start, end;
112
113 start_pfn = PFN_UP((unsigned long)&_end);
114 bootmap_pfn = 0;
115
116 /* ARM26 machines only have one node */
117 if (mi->bank->node != 0)
118 BUG();
119
120 start = PFN_UP(mi->bank->start);
121 end = PFN_DOWN(mi->bank->size + mi->bank->start);
122
123 if (start < start_pfn)
124 start = start_pfn;
125
126 if (end <= start)
127 BUG();
128
129 if (end - start >= bootmap_pages)
130 bootmap_pfn = start;
131 else
132 BUG();
133
134 return bootmap_pfn;
135}
136
137/*
138 * Scan the memory info structure and pull out:
139 * - the end of memory
140 * - the number of nodes
141 * - the pfn range of each node
142 * - the number of bootmem bitmap pages
143 */
144static void __init
145find_memend_and_nodes(struct meminfo *mi, struct node_info *np)
146{
147 unsigned int memend_pfn = 0;
148
149 nodes_clear(node_online_map);
150 node_set_online(0);
151
152 np->bootmap_pages = 0;
153
154 if (mi->bank->size == 0) {
155 BUG();
156 }
157
158 /*
159 * Get the start and end pfns for this bank
160 */
161 np->start = PFN_UP(mi->bank->start);
162 np->end = PFN_DOWN(mi->bank->start + mi->bank->size);
163
164 if (memend_pfn < np->end)
165 memend_pfn = np->end;
166
167 /*
168 * Calculate the number of pages we require to
169 * store the bootmem bitmaps.
170 */
171 np->bootmap_pages = bootmem_bootmap_pages(np->end - np->start);
172
173 /*
174 * This doesn't seem to be used by the Linux memory
175 * manager any more. If we can get rid of it, we
176 * also get rid of some of the stuff above as well.
177 */
178 max_low_pfn = memend_pfn - PFN_DOWN(PHYS_OFFSET);
179 max_pfn = memend_pfn - PFN_DOWN(PHYS_OFFSET);
180 mi->end = memend_pfn << PAGE_SHIFT;
181
182}
183
184/*
185 * Initialise the bootmem allocator for all nodes. This is called
186 * early during the architecture specific initialisation.
187 */
188void __init bootmem_init(struct meminfo *mi)
189{
190 struct node_info node_info;
191 unsigned int bootmap_pfn;
192 pg_data_t *pgdat = NODE_DATA(0);
193
194 find_memend_and_nodes(mi, &node_info);
195
196 bootmap_pfn = find_bootmap_pfn(mi, node_info.bootmap_pages);
197
198 /*
199 * Note that node 0 must always have some pages.
200 */
201 if (node_info.end == 0)
202 BUG();
203
204 /*
205 * Initialise the bootmem allocator.
206 */
207 init_bootmem_node(pgdat, bootmap_pfn, node_info.start, node_info.end);
208
209 /*
210 * Register all available RAM in this node with the bootmem allocator.
211 */
212 free_bootmem_node(pgdat, mi->bank->start, mi->bank->size);
213
214 /*
215 * Register the kernel text and data with bootmem.
216 * Note: with XIP we dont register .text since
217 * its in ROM.
218 */
219#ifdef CONFIG_XIP_KERNEL
220 reserve_bootmem_node(pgdat, __pa(&_sdata), &_end - &_sdata);
221#else
222 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
223#endif
224
225 /*
226 * And don't forget to reserve the allocator bitmap,
227 * which will be freed later.
228 */
229 reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT,
230 node_info.bootmap_pages << PAGE_SHIFT);
231
232 /*
233 * These should likewise go elsewhere. They pre-reserve
234 * the screen memory region at the start of main system
235 * memory. FIXME - screen RAM is not 512K!
236 */
237 reserve_bootmem_node(pgdat, 0x02000000, 0x00080000);
238
239#ifdef CONFIG_BLK_DEV_INITRD
240 initrd_start = phys_initrd_start;
241 initrd_end = initrd_start + phys_initrd_size;
242
243 /* Achimedes machines only have one node, so initrd is in node 0 */
244#ifdef CONFIG_XIP_KERNEL
245 /* Only reserve initrd space if it is in RAM */
246 if(initrd_start && initrd_start < 0x03000000){
247#else
248 if(initrd_start){
249#endif
250 reserve_bootmem_node(pgdat, __pa(initrd_start),
251 initrd_end - initrd_start);
252 }
253#endif /* CONFIG_BLK_DEV_INITRD */
254
255
256}
257
258/*
259 * paging_init() sets up the page tables, initialises the zone memory
260 * maps, and sets up the zero page, bad page and bad page tables.
261 */
262void __init paging_init(struct meminfo *mi)
263{
264 void *zero_page;
265 unsigned long zone_size[MAX_NR_ZONES];
266 unsigned long zhole_size[MAX_NR_ZONES];
267 struct bootmem_data *bdata;
268 pg_data_t *pgdat;
269 int i;
270
271 memcpy(&meminfo, mi, sizeof(meminfo));
272
273 /*
274 * allocate the zero page. Note that we count on this going ok.
275 */
276 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
277
278 /*
279 * initialise the page tables.
280 */
281 memtable_init(mi);
282 flush_tlb_all();
283
284 /*
285 * initialise the zones in node 0 (archimedes have only 1 node)
286 */
287
288 for (i = 0; i < MAX_NR_ZONES; i++) {
289 zone_size[i] = 0;
290 zhole_size[i] = 0;
291 }
292
293 pgdat = NODE_DATA(0);
294 bdata = pgdat->bdata;
295 zone_size[0] = bdata->node_low_pfn -
296 (bdata->node_boot_start >> PAGE_SHIFT);
297 if (!zone_size[0])
298 BUG();
299 pgdat->node_mem_map = NULL;
300 free_area_init_node(0, pgdat, zone_size,
301 bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
302
303 /*
304 * finish off the bad pages once
305 * the mem_map is initialised
306 */
307 memzero(zero_page, PAGE_SIZE);
308 empty_zero_page = virt_to_page(zero_page);
309}
310
311static inline void free_area(unsigned long addr, unsigned long end, char *s)
312{
313 unsigned int size = (end - addr) >> 10;
314
315 for (; addr < end; addr += PAGE_SIZE) {
316 struct page *page = virt_to_page(addr);
317 ClearPageReserved(page);
318 init_page_count(page);
319 free_page(addr);
320 totalram_pages++;
321 }
322
323 if (size && s)
324 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
325}
326
327/*
328 * mem_init() marks the free areas in the mem_map and tells us how much
329 * memory is free. This is done after various parts of the system have
330 * claimed their memory after the kernel image.
331 */
332void __init mem_init(void)
333{
334 unsigned int codepages, datapages, initpages;
335 pg_data_t *pgdat = NODE_DATA(0);
336 extern int sysctl_overcommit_memory;
337
338
339 /* Note: data pages includes BSS */
340#ifdef CONFIG_XIP_KERNEL
341 codepages = &_endtext - &_text;
342 datapages = &_end - &_sdata;
343#else
344 codepages = &_etext - &_text;
345 datapages = &_end - &_etext;
346#endif
347 initpages = &__init_end - &__init_begin;
348
349 high_memory = (void *)__va(meminfo.end);
350 max_mapnr = virt_to_page(high_memory) - mem_map;
351
352 /* this will put all unused low memory onto the freelists */
353 if (pgdat->node_spanned_pages != 0)
354 totalram_pages += free_all_bootmem_node(pgdat);
355
356 num_physpages = meminfo.bank[0].size >> PAGE_SHIFT;
357
358 printk(KERN_INFO "Memory: %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
359 printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
360 "%dK data, %dK init)\n",
361 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
362 codepages >> 10, datapages >> 10, initpages >> 10);
363
364 /*
365 * Turn on overcommit on tiny machines
366 */
367 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
368 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
369 printk("Turning on overcommit\n");
370 }
371}
372
373void free_initmem(void){
374#ifndef CONFIG_XIP_KERNEL
375 free_area((unsigned long)(&__init_begin),
376 (unsigned long)(&__init_end),
377 "init");
378#endif
379}
380
381#ifdef CONFIG_BLK_DEV_INITRD
382
383static int keep_initrd;
384
385void free_initrd_mem(unsigned long start, unsigned long end)
386{
387#ifdef CONFIG_XIP_KERNEL
388 /* Only bin initrd if it is in RAM... */
389 if(!keep_initrd && start < 0x03000000)
390#else
391 if (!keep_initrd)
392#endif
393 free_area(start, end, "initrd");
394}
395
396static int __init keepinitrd_setup(char *__unused)
397{
398 keep_initrd = 1;
399 return 1;
400}
401
402__setup("keepinitrd", keepinitrd_setup);
403#endif
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c
deleted file mode 100644
index ffecd8578247..000000000000
--- a/arch/arm26/mm/memc.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * linux/arch/arm26/mm/memc.c
3 *
4 * Copyright (C) 1998-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Page table sludge for older ARM processor architectures.
11 */
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/bootmem.h>
16
17#include <asm/pgtable.h>
18#include <asm/pgalloc.h>
19#include <asm/page.h>
20#include <asm/memory.h>
21#include <asm/hardware.h>
22
23#include <asm/map.h>
24
25#define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
26
27struct kmem_cache *pte_cache, *pgd_cache;
28int page_nr;
29
30/*
31 * Allocate space for a page table and a MEMC table.
32 * Note that we place the MEMC
33 * table before the page directory. This means we can
34 * easily get to both tightly-associated data structures
35 * with a single pointer.
36 */
37static inline pgd_t *alloc_pgd_table(void)
38{
39 void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
40
41 if (pg2k)
42 pg2k += MEMC_TABLE_SIZE;
43
44 return (pgd_t *)pg2k;
45}
46
47/*
48 * Free a page table. this function is the counterpart to get_pgd_slow
49 * below, not alloc_pgd_table above.
50 */
51void free_pgd_slow(pgd_t *pgd)
52{
53 unsigned long tbl = (unsigned long)pgd;
54
55 tbl -= MEMC_TABLE_SIZE;
56
57 kmem_cache_free(pgd_cache, (void *)tbl);
58}
59
60/*
61 * Allocate a new pgd and fill it in ready for use
62 *
63 * A new tasks pgd is completely empty (all pages !present) except for:
64 *
65 * o The machine vectors at virtual address 0x0
66 * o The vmalloc region at the top of address space
67 *
68 */
69#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
70
71pgd_t *get_pgd_slow(struct mm_struct *mm)
72{
73 pgd_t *new_pgd, *init_pgd;
74 pmd_t *new_pmd, *init_pmd;
75 pte_t *new_pte, *init_pte;
76
77 new_pgd = alloc_pgd_table();
78 if (!new_pgd)
79 goto no_pgd;
80
81 /*
82 * On ARM, first page must always be allocated since it contains
83 * the machine vectors.
84 */
85 new_pmd = pmd_alloc(mm, new_pgd, 0);
86 if (!new_pmd)
87 goto no_pmd;
88
89 new_pte = pte_alloc_map(mm, new_pmd, 0);
90 if (!new_pte)
91 goto no_pte;
92
93 init_pgd = pgd_offset(&init_mm, 0);
94 init_pmd = pmd_offset(init_pgd, 0);
95 init_pte = pte_offset(init_pmd, 0);
96
97 set_pte(new_pte, *init_pte);
98 pte_unmap(new_pte);
99
100 /*
101 * the page table entries are zeroed
102 * when the table is created. (see the cache_ctor functions below)
103 * Now we need to plonk the kernel (vmalloc) area at the end of
104 * the address space. We copy this from the init thread, just like
105 * the init_pte we copied above...
106 */
107 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
108 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
109
110 /* update MEMC tables */
111 cpu_memc_update_all(new_pgd);
112 return new_pgd;
113
114no_pte:
115 pmd_free(new_pmd);
116no_pmd:
117 free_pgd_slow(new_pgd);
118no_pgd:
119 return NULL;
120}
121
122/*
123 * No special code is required here.
124 */
125void setup_mm_for_reboot(char mode)
126{
127}
128
129/*
130 * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
131 * o swapper_pg_dir = 0x0207d000
132 * o kernel proper starts at 0x0208000
133 * o create (allocate) a pte to contain the machine vectors
134 * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?)
135 * o populate the init tasks page directory (pgd) with the new pte
136 * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!)
137 */
138void __init memtable_init(struct meminfo *mi)
139{
140 pte_t *pte;
141 int i;
142
143 page_nr = max_low_pfn;
144
145 pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
146 pte[0] = mk_pte_phys(PAGE_OFFSET + SCREEN_SIZE, PAGE_READONLY);
147 pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);
148
149 for (i = 1; i < PTRS_PER_PGD; i++)
150 pgd_val(swapper_pg_dir[i]) = 0;
151}
152
153void __init iotable_init(struct map_desc *io_desc)
154{
155 /* nothing to do */
156}
157
158/*
159 * We never have holes in the memmap
160 */
161void __init create_memmap_holes(struct meminfo *mi)
162{
163}
164
165static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
166{
167 memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
168}
169
170static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
171{
172 memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
173}
174
175void __init pgtable_cache_init(void)
176{
177 pte_cache = kmem_cache_create("pte-cache",
178 sizeof(pte_t) * PTRS_PER_PTE,
179 0, SLAB_PANIC, pte_cache_ctor);
180
181 pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE +
182 sizeof(pgd_t) * PTRS_PER_PGD,
183 0, SLAB_PANIC, pgd_cache_ctor);
184}
diff --git a/arch/arm26/mm/proc-funcs.S b/arch/arm26/mm/proc-funcs.S
deleted file mode 100644
index f9fca524c57a..000000000000
--- a/arch/arm26/mm/proc-funcs.S
+++ /dev/null
@@ -1,359 +0,0 @@
1/*
2 * linux/arch/arm26/mm/proc-arm2,3.S
3 *
4 * Copyright (C) 1997-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * MMU functions for ARM2,3
11 *
12 * These are the low level assembler for performing cache
13 * and memory functions on ARM2, ARM250 and ARM3 processors.
14 */
15#include <linux/linkage.h>
16#include <asm/assembler.h>
17#include <asm/asm-offsets.h>
18#include <asm/procinfo.h>
19#include <asm/ptrace.h>
20
21/*
22 * MEMC workhorse code. It's both a horse which things it's a pig.
23 */
24/*
25 * Function: cpu_memc_update_entry(pgd_t *pgd, unsigned long phys_pte, unsigned long addr)
26 * Params : pgd Page tables/MEMC mapping
27 * : phys_pte physical address, or PTE
28 * : addr virtual address
29 */
30ENTRY(cpu_memc_update_entry)
31 tst r1, #PAGE_PRESENT @ is the page present
32 orreq r1, r1, #PAGE_OLD | PAGE_CLEAN
33 moveq r2, #0x01f00000
34 mov r3, r1, lsr #13 @ convert to physical page nr
35 and r3, r3, #0x3fc
36 adr ip, memc_phys_table_32
37 ldr r3, [ip, r3]
38 tst r1, #PAGE_OLD | PAGE_NOT_USER
39 biceq r3, r3, #0x200
40 tsteq r1, #PAGE_READONLY | PAGE_CLEAN
41 biceq r3, r3, #0x300
42 mov r2, r2, lsr #15 @ virtual -> nr
43 orr r3, r3, r2, lsl #15
44 and r2, r2, #0x300
45 orr r3, r3, r2, lsl #2
46 and r2, r3, #255
47 sub r0, r0, #256 * 4
48 str r3, [r0, r2, lsl #2]
49 strb r3, [r3]
50 movs pc, lr
51/*
52 * Params : r0 = preserved
53 * : r1 = memc table base (preserved)
54 * : r2 = page table entry
55 * : r3 = preserved
56 * : r4 = unused
57 * : r5 = memc physical address translation table
58 * : ip = virtual address (preserved)
59 */
60update_pte:
61 mov r4, r2, lsr #13
62 and r4, r4, #0x3fc
63 ldr r4, [r5, r4] @ covert to MEMC page
64
65 tst r2, #PAGE_OLD | PAGE_NOT_USER @ check for MEMC read
66 biceq r4, r4, #0x200
67 tsteq r2, #PAGE_READONLY | PAGE_CLEAN @ check for MEMC write
68 biceq r4, r4, #0x300
69
70 orr r4, r4, ip
71 and r2, ip, #0x01800000
72 orr r4, r4, r2, lsr #13
73
74 and r2, r4, #255
75 str r4, [r1, r2, lsl #2]
76 movs pc, lr
77
78/*
79 * Params : r0 = preserved
80 * : r1 = memc table base (preserved)
81 * : r2 = page table base
82 * : r3 = preserved
83 * : r4 = unused
84 * : r5 = memc physical address translation table
85 * : ip = virtual address (updated)
86 */
87update_pte_table:
88 stmfd sp!, {r0, lr}
89 bic r0, r2, #3
901: ldr r2, [r0], #4 @ get entry
91 tst r2, #PAGE_PRESENT @ page present
92 blne update_pte @ process pte
93 add ip, ip, #32768 @ increment virt addr
94 ldr r2, [r0], #4 @ get entry
95 tst r2, #PAGE_PRESENT @ page present
96 blne update_pte @ process pte
97 add ip, ip, #32768 @ increment virt addr
98 ldr r2, [r0], #4 @ get entry
99 tst r2, #PAGE_PRESENT @ page present
100 blne update_pte @ process pte
101 add ip, ip, #32768 @ increment virt addr
102 ldr r2, [r0], #4 @ get entry
103 tst r2, #PAGE_PRESENT @ page present
104 blne update_pte @ process pte
105 add ip, ip, #32768 @ increment virt addr
106 tst ip, #32768 * 31 @ finished?
107 bne 1b
108 ldmfd sp!, {r0, pc}^
109
110/*
111 * Function: cpu_memc_update_all(pgd_t *pgd)
112 * Params : pgd Page tables/MEMC mapping
113 * Notes : this is optimised for 32k pages
114 */
115ENTRY(cpu_memc_update_all)
116 stmfd sp!, {r4, r5, lr}
117 bl clear_tables
118 sub r1, r0, #256 * 4 @ start of MEMC tables
119 adr r5, memc_phys_table_32 @ Convert to logical page number
120 mov ip, #0 @ virtual address
1211: ldmia r0!, {r2, r3} @ load two pgd entries
122 tst r2, #PAGE_PRESENT @ is pgd entry present?
123 addeq ip, ip, #1048576 @FIXME - PAGE_PRESENT is for PTEs technically...
124 blne update_pte_table
125 mov r2, r3
126 tst r2, #PAGE_PRESENT @ is pgd entry present?
127 addeq ip, ip, #1048576
128 blne update_pte_table
129 teq ip, #32 * 1048576
130 bne 1b
131 ldmfd sp!, {r4, r5, pc}^
132
133/*
134 * Build the table to map from physical page number to memc page number
135 */
136 .type memc_phys_table_32, #object
137memc_phys_table_32:
138 .irp b7, 0x00, 0x80
139 .irp b6, 0x00, 0x02
140 .irp b5, 0x00, 0x04
141 .irp b4, 0x00, 0x01
142
143 .irp b3, 0x00, 0x40
144 .irp b2, 0x00, 0x20
145 .irp b1, 0x00, 0x10
146 .irp b0, 0x00, 0x08
147 .long 0x03800300 + \b7 + \b6 + \b5 + \b4 + \b3 + \b2 + \b1 + \b0
148 .endr
149 .endr
150 .endr
151 .endr
152
153 .endr
154 .endr
155 .endr
156 .endr
157 .size memc_phys_table_32, . - memc_phys_table_32
158
159/*
160 * helper for cpu_memc_update_all, this clears out all
161 * mappings, setting them close to the top of memory,
162 * and inaccessible (0x01f00000).
163 * Params : r0 = page table pointer
164 */
165clear_tables: ldr r1, _arm3_set_pgd - 4
166 ldr r2, [r1]
167 sub r1, r0, #256 * 4 @ start of MEMC tables
168 add r2, r1, r2, lsl #2 @ end of tables
169 mov r3, #0x03f00000 @ Default mapping (null mapping)
170 orr r3, r3, #0x00000f00
171 orr r4, r3, #1
172 orr r5, r3, #2
173 orr ip, r3, #3
1741: stmia r1!, {r3, r4, r5, ip}
175 add r3, r3, #4
176 add r4, r4, #4
177 add r5, r5, #4
178 add ip, ip, #4
179 stmia r1!, {r3, r4, r5, ip}
180 add r3, r3, #4
181 add r4, r4, #4
182 add r5, r5, #4
183 add ip, ip, #4
184 teq r1, r2
185 bne 1b
186 mov pc, lr
187
188/*
189 * Function: *_set_pgd(pgd_t *pgd)
190 * Params : pgd New page tables/MEMC mapping
191 * Purpose : update MEMC hardware with new mapping
192 */
193 .word page_nr @ extern - declared in mm-memc.c
194_arm3_set_pgd: mcr p15, 0, r1, c1, c0, 0 @ flush cache
195_arm2_set_pgd: stmfd sp!, {lr}
196 ldr r1, _arm3_set_pgd - 4
197 ldr r2, [r1]
198 sub r0, r0, #256 * 4 @ start of MEMC tables
199 add r1, r0, r2, lsl #2 @ end of tables
2001: ldmia r0!, {r2, r3, ip, lr}
201 strb r2, [r2]
202 strb r3, [r3]
203 strb ip, [ip]
204 strb lr, [lr]
205 ldmia r0!, {r2, r3, ip, lr}
206 strb r2, [r2]
207 strb r3, [r3]
208 strb ip, [ip]
209 strb lr, [lr]
210 teq r0, r1
211 bne 1b
212 ldmfd sp!, {pc}^
213
214/*
215 * Function: *_proc_init (void)
216 * Purpose : Initialise the cache control registers
217 */
218_arm3_proc_init:
219 mov r0, #0x001f0000
220 orr r0, r0, #0x0000ff00
221 orr r0, r0, #0x000000ff
222 mcr p15, 0, r0, c3, c0 @ ARM3 Cacheable
223 mcr p15, 0, r0, c4, c0 @ ARM3 Updateable
224 mov r0, #0
225 mcr p15, 0, r0, c5, c0 @ ARM3 Disruptive
226 mcr p15, 0, r0, c1, c0 @ ARM3 Flush
227 mov r0, #3
228 mcr p15, 0, r0, c2, c0 @ ARM3 Control
229_arm2_proc_init:
230 movs pc, lr
231
232/*
233 * Function: *_proc_fin (void)
234 * Purpose : Finalise processor (disable caches)
235 */
236_arm3_proc_fin: mov r0, #2
237 mcr p15, 0, r0, c2, c0
238_arm2_proc_fin: orrs pc, lr, #PSR_I_BIT|PSR_F_BIT
239
240/*
241 * Function: *_xchg_1 (int new, volatile void *ptr)
242 * Params : new New value to store at...
243 * : ptr pointer to byte-wide location
244 * Purpose : Performs an exchange operation
245 * Returns : Original byte data at 'ptr'
246 */
247_arm2_xchg_1: mov r2, pc
248 orr r2, r2, #PSR_I_BIT
249 teqp r2, #0
250 ldrb r2, [r1]
251 strb r0, [r1]
252 mov r0, r2
253 movs pc, lr
254
255_arm3_xchg_1: swpb r0, r0, [r1]
256 movs pc, lr
257
258/*
259 * Function: *_xchg_4 (int new, volatile void *ptr)
260 * Params : new New value to store at...
261 * : ptr pointer to word-wide location
262 * Purpose : Performs an exchange operation
263 * Returns : Original word data at 'ptr'
264 */
265_arm2_xchg_4: mov r2, pc
266 orr r2, r2, #PSR_I_BIT
267 teqp r2, #0
268 ldr r2, [r1]
269 str r0, [r1]
270 mov r0, r2
271 movs pc, lr
272
273_arm3_xchg_4: swp r0, r0, [r1]
274 movs pc, lr
275
276_arm2_3_check_bugs:
277 bics pc, lr, #PSR_F_BIT @ Clear FIQ disable bit
278
279armvlsi_name: .asciz "ARM/VLSI"
280_arm2_name: .asciz "ARM 2"
281_arm250_name: .asciz "ARM 250"
282_arm3_name: .asciz "ARM 3"
283
284 .section ".init.text", #alloc, #execinstr
285/*
286 * Purpose : Function pointers used to access above functions - all calls
287 * come through these
288 */
289 .globl arm2_processor_functions
290arm2_processor_functions:
291 .word _arm2_3_check_bugs
292 .word _arm2_proc_init
293 .word _arm2_proc_fin
294 .word _arm2_set_pgd
295 .word _arm2_xchg_1
296 .word _arm2_xchg_4
297
298cpu_arm2_info:
299 .long armvlsi_name
300 .long _arm2_name
301
302 .globl arm250_processor_functions
303arm250_processor_functions:
304 .word _arm2_3_check_bugs
305 .word _arm2_proc_init
306 .word _arm2_proc_fin
307 .word _arm2_set_pgd
308 .word _arm3_xchg_1
309 .word _arm3_xchg_4
310
311cpu_arm250_info:
312 .long armvlsi_name
313 .long _arm250_name
314
315 .globl arm3_processor_functions
316arm3_processor_functions:
317 .word _arm2_3_check_bugs
318 .word _arm3_proc_init
319 .word _arm3_proc_fin
320 .word _arm3_set_pgd
321 .word _arm3_xchg_1
322 .word _arm3_xchg_4
323
324cpu_arm3_info:
325 .long armvlsi_name
326 .long _arm3_name
327
328arm2_arch_name: .asciz "armv1"
329arm3_arch_name: .asciz "armv2"
330arm2_elf_name: .asciz "v1"
331arm3_elf_name: .asciz "v2"
332 .align
333
334 .section ".proc.info", #alloc, #execinstr
335
336 .long 0x41560200
337 .long 0xfffffff0
338 .long arm2_arch_name
339 .long arm2_elf_name
340 .long 0
341 .long cpu_arm2_info
342 .long arm2_processor_functions
343
344 .long 0x41560250
345 .long 0xfffffff0
346 .long arm3_arch_name
347 .long arm3_elf_name
348 .long 0
349 .long cpu_arm250_info
350 .long arm250_processor_functions
351
352 .long 0x41560300
353 .long 0xfffffff0
354 .long arm3_arch_name
355 .long arm3_elf_name
356 .long 0
357 .long cpu_arm3_info
358 .long arm3_processor_functions
359
diff --git a/arch/arm26/mm/small_page.c b/arch/arm26/mm/small_page.c
deleted file mode 100644
index 30447106c25f..000000000000
--- a/arch/arm26/mm/small_page.c
+++ /dev/null
@@ -1,192 +0,0 @@
1/*
2 * linux/arch/arm26/mm/small_page.c
3 *
4 * Copyright (C) 1996 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Changelog:
12 * 26/01/1996 RMK Cleaned up various areas to make little more generic
13 * 07/02/1999 RMK Support added for 16K and 32K page sizes
14 * containing 8K blocks
15 * 23/05/2004 IM Fixed to use struct page->lru (thanks wli)
16 *
17 */
18#include <linux/signal.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/ptrace.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/smp.h>
29#include <linux/bitops.h>
30
31#include <asm/pgtable.h>
32
33#define PEDANTIC
34
35/*
36 * Requirement:
37 * We need to be able to allocate naturally aligned memory of finer
38 * granularity than the page size. This is typically used for the
39 * second level page tables on 32-bit ARMs.
40 *
41 * FIXME - this comment is *out of date*
42 * Theory:
43 * We "misuse" the Linux memory management system. We use alloc_page
44 * to allocate a page and then mark it as reserved. The Linux memory
45 * management system will then ignore the "offset", "next_hash" and
46 * "pprev_hash" entries in the mem_map for this page.
47 *
48 * We then use a bitstring in the "offset" field to mark which segments
49 * of the page are in use, and manipulate this as required during the
50 * allocation and freeing of these small pages.
51 *
52 * We also maintain a queue of pages being used for this purpose using
53 * the "next_hash" and "pprev_hash" entries of mem_map;
54 */
55
56struct order {
57 struct list_head queue;
58 unsigned int mask; /* (1 << shift) - 1 */
59 unsigned int shift; /* (1 << shift) size of page */
60 unsigned int block_mask; /* nr_blocks - 1 */
61 unsigned int all_used; /* (1 << nr_blocks) - 1 */
62};
63
64
65static struct order orders[] = {
66#if PAGE_SIZE == 32768
67 { LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff },
68 { LIST_HEAD_INIT(orders[1].queue), 8191, 13, 3, 0x0000000f }
69#else
70#error unsupported page size (ARGH!)
71#endif
72};
73
74#define USED_MAP(pg) ((pg)->index)
75#define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
76#define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
77
78static DEFINE_SPINLOCK(small_page_lock);
79
80static unsigned long __get_small_page(int priority, struct order *order)
81{
82 unsigned long flags;
83 struct page *page;
84 int offset;
85
86 do {
87 spin_lock_irqsave(&small_page_lock, flags);
88
89 if (list_empty(&order->queue))
90 goto need_new_page;
91
92 page = list_entry(order->queue.next, struct page, lru);
93again:
94#ifdef PEDANTIC
95 BUG_ON(USED_MAP(page) & ~order->all_used);
96#endif
97 offset = ffz(USED_MAP(page));
98 SET_USED(page, offset);
99 if (USED_MAP(page) == order->all_used)
100 list_del_init(&page->lru);
101 spin_unlock_irqrestore(&small_page_lock, flags);
102
103 return (unsigned long) page_address(page) + (offset << order->shift);
104
105need_new_page:
106 spin_unlock_irqrestore(&small_page_lock, flags);
107 page = alloc_page(priority);
108 spin_lock_irqsave(&small_page_lock, flags);
109
110 if (list_empty(&order->queue)) {
111 if (!page)
112 goto no_page;
113 SetPageReserved(page);
114 USED_MAP(page) = 0;
115 list_add(&page->lru, &order->queue);
116 goto again;
117 }
118
119 spin_unlock_irqrestore(&small_page_lock, flags);
120 __free_page(page);
121 } while (1);
122
123no_page:
124 spin_unlock_irqrestore(&small_page_lock, flags);
125 return 0;
126}
127
128static void __free_small_page(unsigned long spage, struct order *order)
129{
130 unsigned long flags;
131 struct page *page;
132
133 if (virt_addr_valid(spage)) {
134 page = virt_to_page(spage);
135
136 /*
137 * The container-page must be marked Reserved
138 */
139 if (!PageReserved(page) || spage & order->mask)
140 goto non_small;
141
142#ifdef PEDANTIC
143 BUG_ON(USED_MAP(page) & ~order->all_used);
144#endif
145
146 spage = spage >> order->shift;
147 spage &= order->block_mask;
148
149 /*
150 * the following must be atomic wrt get_page
151 */
152 spin_lock_irqsave(&small_page_lock, flags);
153
154 if (USED_MAP(page) == order->all_used)
155 list_add(&page->lru, &order->queue);
156
157 if (!TEST_AND_CLEAR_USED(page, spage))
158 goto already_free;
159
160 if (USED_MAP(page) == 0)
161 goto free_page;
162
163 spin_unlock_irqrestore(&small_page_lock, flags);
164 }
165 return;
166
167free_page:
168 /*
169 * unlink the page from the small page queue and free it
170 */
171 list_del_init(&page->lru);
172 spin_unlock_irqrestore(&small_page_lock, flags);
173 ClearPageReserved(page);
174 __free_page(page);
175 return;
176
177non_small:
178 printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
179 return;
180already_free:
181 printk("Trying to free free small page from %p\n", __builtin_return_address(0));
182}
183
184unsigned long get_page_8k(int priority)
185{
186 return __get_small_page(priority, orders+1);
187}
188
189void free_page_8k(unsigned long spage)
190{
191 __free_small_page(spage, orders+1);
192}