diff options
Diffstat (limited to 'arch/um/kernel/mem.c')
-rw-r--r-- | arch/um/kernel/mem.c | 359 |
1 files changed, 359 insertions, 0 deletions
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c new file mode 100644 index 000000000000..f156661781cb --- /dev/null +++ b/arch/um/kernel/mem.c | |||
@@ -0,0 +1,359 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include "linux/stddef.h" | ||
7 | #include "linux/kernel.h" | ||
8 | #include "linux/mm.h" | ||
9 | #include "linux/bootmem.h" | ||
10 | #include "linux/swap.h" | ||
11 | #include "linux/highmem.h" | ||
12 | #include "linux/gfp.h" | ||
13 | #include "asm/page.h" | ||
14 | #include "asm/fixmap.h" | ||
15 | #include "asm/pgalloc.h" | ||
16 | #include "user_util.h" | ||
17 | #include "kern_util.h" | ||
18 | #include "kern.h" | ||
19 | #include "mem_user.h" | ||
20 | #include "uml_uaccess.h" | ||
21 | #include "os.h" | ||
22 | |||
23 | extern char __binary_start; | ||
24 | |||
25 | /* Changed during early boot */ | ||
26 | unsigned long *empty_zero_page = NULL; | ||
27 | unsigned long *empty_bad_page = NULL; | ||
28 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
29 | unsigned long highmem; | ||
30 | int kmalloc_ok = 0; | ||
31 | |||
32 | static unsigned long brk_end; | ||
33 | |||
34 | void unmap_physmem(void) | ||
35 | { | ||
36 | os_unmap_memory((void *) brk_end, uml_reserved - brk_end); | ||
37 | } | ||
38 | |||
39 | static void map_cb(void *unused) | ||
40 | { | ||
41 | map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); | ||
42 | } | ||
43 | |||
44 | #ifdef CONFIG_HIGHMEM | ||
45 | static void setup_highmem(unsigned long highmem_start, | ||
46 | unsigned long highmem_len) | ||
47 | { | ||
48 | struct page *page; | ||
49 | unsigned long highmem_pfn; | ||
50 | int i; | ||
51 | |||
52 | highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; | ||
53 | for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){ | ||
54 | page = &mem_map[highmem_pfn + i]; | ||
55 | ClearPageReserved(page); | ||
56 | set_bit(PG_highmem, &page->flags); | ||
57 | set_page_count(page, 1); | ||
58 | __free_page(page); | ||
59 | } | ||
60 | } | ||
61 | #endif | ||
62 | |||
63 | void mem_init(void) | ||
64 | { | ||
65 | unsigned long start; | ||
66 | |||
67 | max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT; | ||
68 | |||
69 | /* clear the zero-page */ | ||
70 | memset((void *) empty_zero_page, 0, PAGE_SIZE); | ||
71 | |||
72 | /* Map in the area just after the brk now that kmalloc is about | ||
73 | * to be turned on. | ||
74 | */ | ||
75 | brk_end = (unsigned long) UML_ROUND_UP(sbrk(0)); | ||
76 | map_cb(NULL); | ||
77 | initial_thread_cb(map_cb, NULL); | ||
78 | free_bootmem(__pa(brk_end), uml_reserved - brk_end); | ||
79 | uml_reserved = brk_end; | ||
80 | |||
81 | /* Fill in any hole at the start of the binary */ | ||
82 | start = (unsigned long) &__binary_start & PAGE_MASK; | ||
83 | if(uml_physmem != start){ | ||
84 | map_memory(uml_physmem, __pa(uml_physmem), start - uml_physmem, | ||
85 | 1, 1, 0); | ||
86 | } | ||
87 | |||
88 | /* this will put all low memory onto the freelists */ | ||
89 | totalram_pages = free_all_bootmem(); | ||
90 | totalhigh_pages = highmem >> PAGE_SHIFT; | ||
91 | totalram_pages += totalhigh_pages; | ||
92 | num_physpages = totalram_pages; | ||
93 | max_pfn = totalram_pages; | ||
94 | printk(KERN_INFO "Memory: %luk available\n", | ||
95 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); | ||
96 | kmalloc_ok = 1; | ||
97 | |||
98 | #ifdef CONFIG_HIGHMEM | ||
99 | setup_highmem(end_iomem, highmem); | ||
100 | #endif | ||
101 | } | ||
102 | |||
103 | static void __init fixrange_init(unsigned long start, unsigned long end, | ||
104 | pgd_t *pgd_base) | ||
105 | { | ||
106 | pgd_t *pgd; | ||
107 | pmd_t *pmd; | ||
108 | pte_t *pte; | ||
109 | int i, j; | ||
110 | unsigned long vaddr; | ||
111 | |||
112 | vaddr = start; | ||
113 | i = pgd_index(vaddr); | ||
114 | j = pmd_index(vaddr); | ||
115 | pgd = pgd_base + i; | ||
116 | |||
117 | for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { | ||
118 | pmd = (pmd_t *)pgd; | ||
119 | for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { | ||
120 | if (pmd_none(*pmd)) { | ||
121 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
122 | set_pmd(pmd, __pmd(_KERNPG_TABLE + | ||
123 | (unsigned long) __pa(pte))); | ||
124 | if (pte != pte_offset_kernel(pmd, 0)) | ||
125 | BUG(); | ||
126 | } | ||
127 | vaddr += PMD_SIZE; | ||
128 | } | ||
129 | j = 0; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | #ifdef CONFIG_HIGHMEM | ||
134 | pte_t *kmap_pte; | ||
135 | pgprot_t kmap_prot; | ||
136 | |||
137 | #define kmap_get_fixmap_pte(vaddr) \ | ||
138 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ | ||
139 | (vaddr)), (vaddr)) | ||
140 | |||
141 | static void __init kmap_init(void) | ||
142 | { | ||
143 | unsigned long kmap_vstart; | ||
144 | |||
145 | /* cache the first kmap pte */ | ||
146 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | ||
147 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | ||
148 | |||
149 | kmap_prot = PAGE_KERNEL; | ||
150 | } | ||
151 | |||
152 | static void init_highmem(void) | ||
153 | { | ||
154 | pgd_t *pgd; | ||
155 | pud_t *pud; | ||
156 | pmd_t *pmd; | ||
157 | pte_t *pte; | ||
158 | unsigned long vaddr; | ||
159 | |||
160 | /* | ||
161 | * Permanent kmaps: | ||
162 | */ | ||
163 | vaddr = PKMAP_BASE; | ||
164 | fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); | ||
165 | |||
166 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
167 | pud = pud_offset(pgd, vaddr); | ||
168 | pmd = pmd_offset(pud, vaddr); | ||
169 | pte = pte_offset_kernel(pmd, vaddr); | ||
170 | pkmap_page_table = pte; | ||
171 | |||
172 | kmap_init(); | ||
173 | } | ||
174 | #endif /* CONFIG_HIGHMEM */ | ||
175 | |||
176 | static void __init fixaddr_user_init( void) | ||
177 | { | ||
178 | #if CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA | ||
179 | long size = FIXADDR_USER_END - FIXADDR_USER_START; | ||
180 | pgd_t *pgd; | ||
181 | pud_t *pud; | ||
182 | pmd_t *pmd; | ||
183 | pte_t *pte; | ||
184 | unsigned long paddr, vaddr = FIXADDR_USER_START; | ||
185 | |||
186 | if ( ! size ) | ||
187 | return; | ||
188 | |||
189 | fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); | ||
190 | paddr = (unsigned long)alloc_bootmem_low_pages( size); | ||
191 | memcpy( (void *)paddr, (void *)FIXADDR_USER_START, size); | ||
192 | paddr = __pa(paddr); | ||
193 | for ( ; size > 0; size-=PAGE_SIZE, vaddr+=PAGE_SIZE, paddr+=PAGE_SIZE){ | ||
194 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
195 | pud = pud_offset(pgd, vaddr); | ||
196 | pmd = pmd_offset(pud, vaddr); | ||
197 | pte = pte_offset_kernel(pmd, vaddr); | ||
198 | pte_set_val( (*pte), paddr, PAGE_READONLY); | ||
199 | } | ||
200 | #endif | ||
201 | } | ||
202 | |||
203 | void paging_init(void) | ||
204 | { | ||
205 | unsigned long zones_size[MAX_NR_ZONES], vaddr; | ||
206 | int i; | ||
207 | |||
208 | empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
209 | empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
210 | for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++) | ||
211 | zones_size[i] = 0; | ||
212 | zones_size[0] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT); | ||
213 | zones_size[2] = highmem >> PAGE_SHIFT; | ||
214 | free_area_init(zones_size); | ||
215 | |||
216 | /* | ||
217 | * Fixed mappings, only the page table structure has to be | ||
218 | * created - mappings will be set by set_fixmap(): | ||
219 | */ | ||
220 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | ||
221 | fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); | ||
222 | |||
223 | fixaddr_user_init(); | ||
224 | |||
225 | #ifdef CONFIG_HIGHMEM | ||
226 | init_highmem(); | ||
227 | #endif | ||
228 | } | ||
229 | |||
230 | struct page *arch_validate(struct page *page, int mask, int order) | ||
231 | { | ||
232 | unsigned long addr, zero = 0; | ||
233 | int i; | ||
234 | |||
235 | again: | ||
236 | if(page == NULL) return(page); | ||
237 | if(PageHighMem(page)) return(page); | ||
238 | |||
239 | addr = (unsigned long) page_address(page); | ||
240 | for(i = 0; i < (1 << order); i++){ | ||
241 | current->thread.fault_addr = (void *) addr; | ||
242 | if(__do_copy_to_user((void __user *) addr, &zero, | ||
243 | sizeof(zero), | ||
244 | ¤t->thread.fault_addr, | ||
245 | ¤t->thread.fault_catcher)){ | ||
246 | if(!(mask & __GFP_WAIT)) return(NULL); | ||
247 | else break; | ||
248 | } | ||
249 | addr += PAGE_SIZE; | ||
250 | } | ||
251 | |||
252 | if(i == (1 << order)) return(page); | ||
253 | page = alloc_pages(mask, order); | ||
254 | goto again; | ||
255 | } | ||
256 | |||
257 | /* This can't do anything because nothing in the kernel image can be freed | ||
258 | * since it's not in kernel physical memory. | ||
259 | */ | ||
260 | |||
261 | void free_initmem(void) | ||
262 | { | ||
263 | } | ||
264 | |||
265 | #ifdef CONFIG_BLK_DEV_INITRD | ||
266 | |||
267 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
268 | { | ||
269 | if (start < end) | ||
270 | printk ("Freeing initrd memory: %ldk freed\n", | ||
271 | (end - start) >> 10); | ||
272 | for (; start < end; start += PAGE_SIZE) { | ||
273 | ClearPageReserved(virt_to_page(start)); | ||
274 | set_page_count(virt_to_page(start), 1); | ||
275 | free_page(start); | ||
276 | totalram_pages++; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | #endif | ||
281 | |||
282 | void show_mem(void) | ||
283 | { | ||
284 | int pfn, total = 0, reserved = 0; | ||
285 | int shared = 0, cached = 0; | ||
286 | int highmem = 0; | ||
287 | struct page *page; | ||
288 | |||
289 | printk("Mem-info:\n"); | ||
290 | show_free_areas(); | ||
291 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
292 | pfn = max_mapnr; | ||
293 | while(pfn-- > 0) { | ||
294 | page = pfn_to_page(pfn); | ||
295 | total++; | ||
296 | if(PageHighMem(page)) | ||
297 | highmem++; | ||
298 | if(PageReserved(page)) | ||
299 | reserved++; | ||
300 | else if(PageSwapCache(page)) | ||
301 | cached++; | ||
302 | else if(page_count(page)) | ||
303 | shared += page_count(page) - 1; | ||
304 | } | ||
305 | printk("%d pages of RAM\n", total); | ||
306 | printk("%d pages of HIGHMEM\n", highmem); | ||
307 | printk("%d reserved pages\n", reserved); | ||
308 | printk("%d pages shared\n", shared); | ||
309 | printk("%d pages swap cached\n", cached); | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Allocate and free page tables. | ||
314 | */ | ||
315 | |||
316 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
317 | { | ||
318 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); | ||
319 | |||
320 | if (pgd) { | ||
321 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | ||
322 | memcpy(pgd + USER_PTRS_PER_PGD, | ||
323 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
324 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
325 | } | ||
326 | return pgd; | ||
327 | } | ||
328 | |||
329 | void pgd_free(pgd_t *pgd) | ||
330 | { | ||
331 | free_page((unsigned long) pgd); | ||
332 | } | ||
333 | |||
334 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
335 | { | ||
336 | pte_t *pte; | ||
337 | |||
338 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
339 | return pte; | ||
340 | } | ||
341 | |||
342 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
343 | { | ||
344 | struct page *pte; | ||
345 | |||
346 | pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
347 | return pte; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
352 | * Emacs will notice this stuff at the end of the file and automatically | ||
353 | * adjust the settings for this buffer only. This must remain at the end | ||
354 | * of the file. | ||
355 | * --------------------------------------------------------------------------- | ||
356 | * Local variables: | ||
357 | * c-file-style: "linux" | ||
358 | * End: | ||
359 | */ | ||