diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/init.c | 480 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/mm-armv.c | 172 |
3 files changed, 291 insertions, 362 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index edffa47a4b2a..f4496813615a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/arm/mm/init.c | 2 | * linux/arch/arm/mm/init.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995-2002 Russell King | 4 | * Copyright (C) 1995-2005 Russell King |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -86,14 +86,19 @@ void show_mem(void) | |||
86 | printk("%d pages swap cached\n", cached); | 86 | printk("%d pages swap cached\n", cached); |
87 | } | 87 | } |
88 | 88 | ||
89 | struct node_info { | 89 | static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) |
90 | unsigned int start; | 90 | { |
91 | unsigned int end; | 91 | return pmd_offset(pgd, virt); |
92 | int bootmap_pages; | 92 | } |
93 | }; | 93 | |
94 | static inline pmd_t *pmd_off_k(unsigned long virt) | ||
95 | { | ||
96 | return pmd_off(pgd_offset_k(virt), virt); | ||
97 | } | ||
94 | 98 | ||
95 | #define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT) | 99 | #define for_each_nodebank(iter,mi,no) \ |
96 | #define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) | 100 | for (iter = 0; iter < mi->nr_banks; iter++) \ |
101 | if (mi->bank[iter].node == no) | ||
97 | 102 | ||
98 | /* | 103 | /* |
99 | * FIXME: We really want to avoid allocating the bootmap bitmap | 104 | * FIXME: We really want to avoid allocating the bootmap bitmap |
@@ -106,15 +111,12 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) | |||
106 | { | 111 | { |
107 | unsigned int start_pfn, bank, bootmap_pfn; | 112 | unsigned int start_pfn, bank, bootmap_pfn; |
108 | 113 | ||
109 | start_pfn = O_PFN_UP(__pa(&_end)); | 114 | start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; |
110 | bootmap_pfn = 0; | 115 | bootmap_pfn = 0; |
111 | 116 | ||
112 | for (bank = 0; bank < mi->nr_banks; bank ++) { | 117 | for_each_nodebank(bank, mi, node) { |
113 | unsigned int start, end; | 118 | unsigned int start, end; |
114 | 119 | ||
115 | if (mi->bank[bank].node != node) | ||
116 | continue; | ||
117 | |||
118 | start = mi->bank[bank].start >> PAGE_SHIFT; | 120 | start = mi->bank[bank].start >> PAGE_SHIFT; |
119 | end = (mi->bank[bank].size + | 121 | end = (mi->bank[bank].size + |
120 | mi->bank[bank].start) >> PAGE_SHIFT; | 122 | mi->bank[bank].start) >> PAGE_SHIFT; |
@@ -140,92 +142,6 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) | |||
140 | return bootmap_pfn; | 142 | return bootmap_pfn; |
141 | } | 143 | } |
142 | 144 | ||
143 | /* | ||
144 | * Scan the memory info structure and pull out: | ||
145 | * - the end of memory | ||
146 | * - the number of nodes | ||
147 | * - the pfn range of each node | ||
148 | * - the number of bootmem bitmap pages | ||
149 | */ | ||
150 | static unsigned int __init | ||
151 | find_memend_and_nodes(struct meminfo *mi, struct node_info *np) | ||
152 | { | ||
153 | unsigned int i, bootmem_pages = 0, memend_pfn = 0; | ||
154 | |||
155 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
156 | np[i].start = -1U; | ||
157 | np[i].end = 0; | ||
158 | np[i].bootmap_pages = 0; | ||
159 | } | ||
160 | |||
161 | for (i = 0; i < mi->nr_banks; i++) { | ||
162 | unsigned long start, end; | ||
163 | int node; | ||
164 | |||
165 | if (mi->bank[i].size == 0) { | ||
166 | /* | ||
167 | * Mark this bank with an invalid node number | ||
168 | */ | ||
169 | mi->bank[i].node = -1; | ||
170 | continue; | ||
171 | } | ||
172 | |||
173 | node = mi->bank[i].node; | ||
174 | |||
175 | /* | ||
176 | * Make sure we haven't exceeded the maximum number of nodes | ||
177 | * that we have in this configuration. If we have, we're in | ||
178 | * trouble. (maybe we ought to limit, instead of bugging?) | ||
179 | */ | ||
180 | if (node >= MAX_NUMNODES) | ||
181 | BUG(); | ||
182 | node_set_online(node); | ||
183 | |||
184 | /* | ||
185 | * Get the start and end pfns for this bank | ||
186 | */ | ||
187 | start = mi->bank[i].start >> PAGE_SHIFT; | ||
188 | end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; | ||
189 | |||
190 | if (np[node].start > start) | ||
191 | np[node].start = start; | ||
192 | |||
193 | if (np[node].end < end) | ||
194 | np[node].end = end; | ||
195 | |||
196 | if (memend_pfn < end) | ||
197 | memend_pfn = end; | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Calculate the number of pages we require to | ||
202 | * store the bootmem bitmaps. | ||
203 | */ | ||
204 | for_each_online_node(i) { | ||
205 | if (np[i].end == 0) | ||
206 | continue; | ||
207 | |||
208 | np[i].bootmap_pages = bootmem_bootmap_pages(np[i].end - | ||
209 | np[i].start); | ||
210 | bootmem_pages += np[i].bootmap_pages; | ||
211 | } | ||
212 | |||
213 | high_memory = __va(memend_pfn << PAGE_SHIFT); | ||
214 | |||
215 | /* | ||
216 | * This doesn't seem to be used by the Linux memory | ||
217 | * manager any more. If we can get rid of it, we | ||
218 | * also get rid of some of the stuff above as well. | ||
219 | * | ||
220 | * Note: max_low_pfn and max_pfn reflect the number | ||
221 | * of _pages_ in the system, not the maximum PFN. | ||
222 | */ | ||
223 | max_low_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET); | ||
224 | max_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET); | ||
225 | |||
226 | return bootmem_pages; | ||
227 | } | ||
228 | |||
229 | static int __init check_initrd(struct meminfo *mi) | 145 | static int __init check_initrd(struct meminfo *mi) |
230 | { | 146 | { |
231 | int initrd_node = -2; | 147 | int initrd_node = -2; |
@@ -266,9 +182,8 @@ static int __init check_initrd(struct meminfo *mi) | |||
266 | /* | 182 | /* |
267 | * Reserve the various regions of node 0 | 183 | * Reserve the various regions of node 0 |
268 | */ | 184 | */ |
269 | static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages) | 185 | static __init void reserve_node_zero(pg_data_t *pgdat) |
270 | { | 186 | { |
271 | pg_data_t *pgdat = NODE_DATA(0); | ||
272 | unsigned long res_size = 0; | 187 | unsigned long res_size = 0; |
273 | 188 | ||
274 | /* | 189 | /* |
@@ -289,13 +204,6 @@ static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int boot | |||
289 | PTRS_PER_PGD * sizeof(pgd_t)); | 204 | PTRS_PER_PGD * sizeof(pgd_t)); |
290 | 205 | ||
291 | /* | 206 | /* |
292 | * And don't forget to reserve the allocator bitmap, | ||
293 | * which will be freed later. | ||
294 | */ | ||
295 | reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT, | ||
296 | bootmap_pages << PAGE_SHIFT); | ||
297 | |||
298 | /* | ||
299 | * Hmm... This should go elsewhere, but we really really need to | 207 | * Hmm... This should go elsewhere, but we really really need to |
300 | * stop things allocating the low memory; ideally we need a better | 208 | * stop things allocating the low memory; ideally we need a better |
301 | * implementation of GFP_DMA which does not assume that DMA-able | 209 | * implementation of GFP_DMA which does not assume that DMA-able |
@@ -324,183 +232,276 @@ static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int boot | |||
324 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); | 232 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); |
325 | } | 233 | } |
326 | 234 | ||
327 | /* | 235 | void __init build_mem_type_table(void); |
328 | * Register all available RAM in this node with the bootmem allocator. | 236 | void __init create_mapping(struct map_desc *md); |
329 | */ | 237 | |
330 | static inline void free_bootmem_node_bank(int node, struct meminfo *mi) | 238 | static unsigned long __init |
239 | bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | ||
331 | { | 240 | { |
332 | pg_data_t *pgdat = NODE_DATA(node); | 241 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; |
333 | int bank; | 242 | unsigned long start_pfn, end_pfn, boot_pfn; |
243 | unsigned int boot_pages; | ||
244 | pg_data_t *pgdat; | ||
245 | int i; | ||
334 | 246 | ||
335 | for (bank = 0; bank < mi->nr_banks; bank++) | 247 | start_pfn = -1UL; |
336 | if (mi->bank[bank].node == node) | 248 | end_pfn = 0; |
337 | free_bootmem_node(pgdat, mi->bank[bank].start, | ||
338 | mi->bank[bank].size); | ||
339 | } | ||
340 | 249 | ||
341 | /* | 250 | /* |
342 | * Initialise the bootmem allocator for all nodes. This is called | 251 | * Calculate the pfn range, and map the memory banks for this node. |
343 | * early during the architecture specific initialisation. | 252 | */ |
344 | */ | 253 | for_each_nodebank(i, mi, node) { |
345 | static void __init bootmem_init(struct meminfo *mi) | 254 | unsigned long start, end; |
346 | { | 255 | struct map_desc map; |
347 | struct node_info node_info[MAX_NUMNODES], *np = node_info; | ||
348 | unsigned int bootmap_pages, bootmap_pfn, map_pg; | ||
349 | int node, initrd_node; | ||
350 | 256 | ||
351 | bootmap_pages = find_memend_and_nodes(mi, np); | 257 | start = mi->bank[i].start >> PAGE_SHIFT; |
352 | bootmap_pfn = find_bootmap_pfn(0, mi, bootmap_pages); | 258 | end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; |
353 | initrd_node = check_initrd(mi); | ||
354 | 259 | ||
355 | map_pg = bootmap_pfn; | 260 | if (start_pfn > start) |
261 | start_pfn = start; | ||
262 | if (end_pfn < end) | ||
263 | end_pfn = end; | ||
264 | |||
265 | map.pfn = __phys_to_pfn(mi->bank[i].start); | ||
266 | map.virtual = __phys_to_virt(mi->bank[i].start); | ||
267 | map.length = mi->bank[i].size; | ||
268 | map.type = MT_MEMORY; | ||
269 | |||
270 | create_mapping(&map); | ||
271 | } | ||
356 | 272 | ||
357 | /* | 273 | /* |
358 | * Initialise the bootmem nodes. | 274 | * If there is no memory in this node, ignore it. |
359 | * | ||
360 | * What we really want to do is: | ||
361 | * | ||
362 | * unmap_all_regions_except_kernel(); | ||
363 | * for_each_node_in_reverse_order(node) { | ||
364 | * map_node(node); | ||
365 | * allocate_bootmem_map(node); | ||
366 | * init_bootmem_node(node); | ||
367 | * free_bootmem_node(node); | ||
368 | * } | ||
369 | * | ||
370 | * but this is a 2.5-type change. For now, we just set | ||
371 | * the nodes up in reverse order. | ||
372 | * | ||
373 | * (we could also do with rolling bootmem_init and paging_init | ||
374 | * into one generic "memory_init" type function). | ||
375 | */ | 275 | */ |
376 | np += num_online_nodes() - 1; | 276 | if (end_pfn == 0) |
377 | for (node = num_online_nodes() - 1; node >= 0; node--, np--) { | 277 | return end_pfn; |
378 | /* | ||
379 | * If there are no pages in this node, ignore it. | ||
380 | * Note that node 0 must always have some pages. | ||
381 | */ | ||
382 | if (np->end == 0 || !node_online(node)) { | ||
383 | if (node == 0) | ||
384 | BUG(); | ||
385 | continue; | ||
386 | } | ||
387 | 278 | ||
388 | /* | 279 | /* |
389 | * Initialise the bootmem allocator. | 280 | * Allocate the bootmem bitmap page. |
390 | */ | 281 | */ |
391 | init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end); | 282 | boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
392 | free_bootmem_node_bank(node, mi); | 283 | boot_pfn = find_bootmap_pfn(node, mi, boot_pages); |
393 | map_pg += np->bootmap_pages; | ||
394 | 284 | ||
395 | /* | 285 | /* |
396 | * If this is node 0, we need to reserve some areas ASAP - | 286 | * Initialise the bootmem allocator for this node, handing the |
397 | * we may use bootmem on node 0 to setup the other nodes. | 287 | * memory banks over to bootmem. |
398 | */ | 288 | */ |
399 | if (node == 0) | 289 | node_set_online(node); |
400 | reserve_node_zero(bootmap_pfn, bootmap_pages); | 290 | pgdat = NODE_DATA(node); |
401 | } | 291 | init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); |
402 | 292 | ||
293 | for_each_nodebank(i, mi, node) | ||
294 | free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size); | ||
295 | |||
296 | /* | ||
297 | * Reserve the bootmem bitmap for this node. | ||
298 | */ | ||
299 | reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, | ||
300 | boot_pages << PAGE_SHIFT); | ||
403 | 301 | ||
404 | #ifdef CONFIG_BLK_DEV_INITRD | 302 | #ifdef CONFIG_BLK_DEV_INITRD |
405 | if (phys_initrd_size && initrd_node >= 0) { | 303 | /* |
406 | reserve_bootmem_node(NODE_DATA(initrd_node), phys_initrd_start, | 304 | * If the initrd is in this node, reserve its memory. |
305 | */ | ||
306 | if (node == initrd_node) { | ||
307 | reserve_bootmem_node(pgdat, phys_initrd_start, | ||
407 | phys_initrd_size); | 308 | phys_initrd_size); |
408 | initrd_start = __phys_to_virt(phys_initrd_start); | 309 | initrd_start = __phys_to_virt(phys_initrd_start); |
409 | initrd_end = initrd_start + phys_initrd_size; | 310 | initrd_end = initrd_start + phys_initrd_size; |
410 | } | 311 | } |
411 | #endif | 312 | #endif |
412 | 313 | ||
413 | BUG_ON(map_pg != bootmap_pfn + bootmap_pages); | 314 | /* |
315 | * Finally, reserve any node zero regions. | ||
316 | */ | ||
317 | if (node == 0) | ||
318 | reserve_node_zero(pgdat); | ||
319 | |||
320 | /* | ||
321 | * initialise the zones within this node. | ||
322 | */ | ||
323 | memset(zone_size, 0, sizeof(zone_size)); | ||
324 | memset(zhole_size, 0, sizeof(zhole_size)); | ||
325 | |||
326 | /* | ||
327 | * The size of this node has already been determined. If we need | ||
328 | * to do anything fancy with the allocation of this memory to the | ||
329 | * zones, now is the time to do it. | ||
330 | */ | ||
331 | zone_size[0] = end_pfn - start_pfn; | ||
332 | |||
333 | /* | ||
334 | * For each bank in this node, calculate the size of the holes. | ||
335 | * holes = node_size - sum(bank_sizes_in_node) | ||
336 | */ | ||
337 | zhole_size[0] = zone_size[0]; | ||
338 | for_each_nodebank(i, mi, node) | ||
339 | zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; | ||
340 | |||
341 | /* | ||
342 | * Adjust the sizes according to any special requirements for | ||
343 | * this machine type. | ||
344 | */ | ||
345 | arch_adjust_zones(node, zone_size, zhole_size); | ||
346 | |||
347 | free_area_init_node(node, pgdat, zone_size, start_pfn, zhole_size); | ||
348 | |||
349 | return end_pfn; | ||
414 | } | 350 | } |
415 | 351 | ||
416 | /* | 352 | static void __init bootmem_init(struct meminfo *mi) |
417 | * paging_init() sets up the page tables, initialises the zone memory | ||
418 | * maps, and sets up the zero page, bad page and bad page tables. | ||
419 | */ | ||
420 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | ||
421 | { | 353 | { |
422 | void *zero_page; | 354 | unsigned long addr, memend_pfn = 0; |
423 | int node; | 355 | int node, initrd_node, i; |
424 | 356 | ||
425 | bootmem_init(mi); | 357 | /* |
358 | * Invalidate the node number for empty or invalid memory banks | ||
359 | */ | ||
360 | for (i = 0; i < mi->nr_banks; i++) | ||
361 | if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES) | ||
362 | mi->bank[i].node = -1; | ||
426 | 363 | ||
427 | memcpy(&meminfo, mi, sizeof(meminfo)); | 364 | memcpy(&meminfo, mi, sizeof(meminfo)); |
428 | 365 | ||
366 | #ifdef CONFIG_XIP_KERNEL | ||
367 | #error needs fixing | ||
368 | p->pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PMD_MASK); | ||
369 | p->virtual = (unsigned long)&_stext & PMD_MASK; | ||
370 | p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; | ||
371 | p->type = MT_ROM; | ||
372 | p ++; | ||
373 | #endif | ||
374 | |||
429 | /* | 375 | /* |
430 | * allocate the zero page. Note that we count on this going ok. | 376 | * Clear out all the mappings below the kernel image. |
377 | * FIXME: what about XIP? | ||
431 | */ | 378 | */ |
432 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | 379 | for (addr = 0; addr < PAGE_OFFSET; addr += PGDIR_SIZE) |
380 | pmd_clear(pmd_off_k(addr)); | ||
433 | 381 | ||
434 | /* | 382 | /* |
435 | * initialise the page tables. | 383 | * Clear out all the kernel space mappings, except for the first |
384 | * memory bank, up to the end of the vmalloc region. | ||
436 | */ | 385 | */ |
437 | memtable_init(mi); | 386 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); |
438 | if (mdesc->map_io) | 387 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
439 | mdesc->map_io(); | 388 | pmd_clear(pmd_off_k(addr)); |
440 | local_flush_tlb_all(); | ||
441 | 389 | ||
442 | /* | 390 | /* |
443 | * initialise the zones within each node | 391 | * Locate which node contains the ramdisk image, if any. |
444 | */ | 392 | */ |
445 | for_each_online_node(node) { | 393 | initrd_node = check_initrd(mi); |
446 | unsigned long zone_size[MAX_NR_ZONES]; | ||
447 | unsigned long zhole_size[MAX_NR_ZONES]; | ||
448 | struct bootmem_data *bdata; | ||
449 | pg_data_t *pgdat; | ||
450 | int i; | ||
451 | 394 | ||
452 | /* | 395 | /* |
453 | * Initialise the zone size information. | 396 | * Run through each node initialising the bootmem allocator. |
454 | */ | 397 | */ |
455 | for (i = 0; i < MAX_NR_ZONES; i++) { | 398 | for_each_node(node) { |
456 | zone_size[i] = 0; | 399 | unsigned long end_pfn; |
457 | zhole_size[i] = 0; | ||
458 | } | ||
459 | 400 | ||
460 | pgdat = NODE_DATA(node); | 401 | end_pfn = bootmem_init_node(node, initrd_node, mi); |
461 | bdata = pgdat->bdata; | ||
462 | 402 | ||
463 | /* | 403 | /* |
464 | * The size of this node has already been determined. | 404 | * Remember the highest memory PFN. |
465 | * If we need to do anything fancy with the allocation | ||
466 | * of this memory to the zones, now is the time to do | ||
467 | * it. | ||
468 | */ | 405 | */ |
469 | zone_size[0] = bdata->node_low_pfn - | 406 | if (end_pfn > memend_pfn) |
470 | (bdata->node_boot_start >> PAGE_SHIFT); | 407 | memend_pfn = end_pfn; |
408 | } | ||
471 | 409 | ||
472 | /* | 410 | high_memory = __va(memend_pfn << PAGE_SHIFT); |
473 | * If this zone has zero size, skip it. | ||
474 | */ | ||
475 | if (!zone_size[0]) | ||
476 | continue; | ||
477 | 411 | ||
478 | /* | 412 | /* |
479 | * For each bank in this node, calculate the size of the | 413 | * This doesn't seem to be used by the Linux memory manager any |
480 | * holes. holes = node_size - sum(bank_sizes_in_node) | 414 | * more, but is used by ll_rw_block. If we can get rid of it, we |
481 | */ | 415 | * also get rid of some of the stuff above as well. |
482 | zhole_size[0] = zone_size[0]; | 416 | * |
483 | for (i = 0; i < mi->nr_banks; i++) { | 417 | * Note: max_low_pfn and max_pfn reflect the number of _pages_ in |
484 | if (mi->bank[i].node != node) | 418 | * the system, not the maximum PFN. |
485 | continue; | 419 | */ |
420 | max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; | ||
421 | } | ||
486 | 422 | ||
487 | zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; | 423 | /* |
488 | } | 424 | * Set up device the mappings. Since we clear out the page tables for all |
425 | * mappings above VMALLOC_END, we will remove any debug device mappings. | ||
426 | * This means you have to be careful how you debug this function, or any | ||
427 | * called function. (Do it by code inspection!) | ||
428 | */ | ||
429 | static void __init devicemaps_init(struct machine_desc *mdesc) | ||
430 | { | ||
431 | struct map_desc map; | ||
432 | unsigned long addr; | ||
433 | void *vectors; | ||
489 | 434 | ||
490 | /* | 435 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
491 | * Adjust the sizes according to any special | 436 | pmd_clear(pmd_off_k(addr)); |
492 | * requirements for this machine type. | ||
493 | */ | ||
494 | arch_adjust_zones(node, zone_size, zhole_size); | ||
495 | 437 | ||
496 | free_area_init_node(node, pgdat, zone_size, | 438 | /* |
497 | bdata->node_boot_start >> PAGE_SHIFT, zhole_size); | 439 | * Map the cache flushing regions. |
440 | */ | ||
441 | #ifdef FLUSH_BASE | ||
442 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); | ||
443 | map.virtual = FLUSH_BASE; | ||
444 | map.length = PGDIR_SIZE; | ||
445 | map.type = MT_CACHECLEAN; | ||
446 | create_mapping(&map); | ||
447 | #endif | ||
448 | #ifdef FLUSH_BASE_MINICACHE | ||
449 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + PGDIR_SIZE); | ||
450 | map.virtual = FLUSH_BASE_MINICACHE; | ||
451 | map.length = PGDIR_SIZE; | ||
452 | map.type = MT_MINICLEAN; | ||
453 | create_mapping(&map); | ||
454 | #endif | ||
455 | |||
456 | flush_cache_all(); | ||
457 | local_flush_tlb_all(); | ||
458 | |||
459 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); | ||
460 | BUG_ON(!vectors); | ||
461 | |||
462 | /* | ||
463 | * Create a mapping for the machine vectors at the high-vectors | ||
464 | * location (0xffff0000). If we aren't using high-vectors, also | ||
465 | * create a mapping at the low-vectors virtual address. | ||
466 | */ | ||
467 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | ||
468 | map.virtual = 0xffff0000; | ||
469 | map.length = PAGE_SIZE; | ||
470 | map.type = MT_HIGH_VECTORS; | ||
471 | create_mapping(&map); | ||
472 | |||
473 | if (!vectors_high()) { | ||
474 | map.virtual = 0; | ||
475 | map.type = MT_LOW_VECTORS; | ||
476 | create_mapping(&map); | ||
498 | } | 477 | } |
499 | 478 | ||
500 | /* | 479 | /* |
501 | * finish off the bad pages once | 480 | * Ask the machine support to map in the statically mapped devices. |
502 | * the mem_map is initialised | 481 | * After this point, we can start to touch devices again. |
482 | */ | ||
483 | if (mdesc->map_io) | ||
484 | mdesc->map_io(); | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * paging_init() sets up the page tables, initialises the zone memory | ||
489 | * maps, and sets up the zero page, bad page and bad page tables. | ||
490 | */ | ||
491 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | ||
492 | { | ||
493 | void *zero_page; | ||
494 | |||
495 | build_mem_type_table(); | ||
496 | bootmem_init(mi); | ||
497 | devicemaps_init(mdesc); | ||
498 | |||
499 | top_pmd = pmd_off_k(0xffff0000); | ||
500 | |||
501 | /* | ||
502 | * allocate the zero page. Note that we count on this going ok. | ||
503 | */ | 503 | */ |
504 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
504 | memzero(zero_page, PAGE_SIZE); | 505 | memzero(zero_page, PAGE_SIZE); |
505 | empty_zero_page = virt_to_page(zero_page); | 506 | empty_zero_page = virt_to_page(zero_page); |
506 | flush_dcache_page(empty_zero_page); | 507 | flush_dcache_page(empty_zero_page); |
@@ -562,10 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
562 | * may not be the case, especially if the user has provided the | 563 | * may not be the case, especially if the user has provided the |
563 | * information on the command line. | 564 | * information on the command line. |
564 | */ | 565 | */ |
565 | for (i = 0; i < mi->nr_banks; i++) { | 566 | for_each_nodebank(i, mi, node) { |
566 | if (mi->bank[i].size == 0 || mi->bank[i].node != node) | ||
567 | continue; | ||
568 | |||
569 | bank_start = mi->bank[i].start >> PAGE_SHIFT; | 567 | bank_start = mi->bank[i].start >> PAGE_SHIFT; |
570 | if (bank_start < prev_bank_end) { | 568 | if (bank_start < prev_bank_end) { |
571 | printk(KERN_ERR "MEM: unordered memory banks. " | 569 | printk(KERN_ERR "MEM: unordered memory banks. " |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 7110e54182b1..6fb1258df1b5 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | 27 | ||
28 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | #include <asm/hardware.h> | ||
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
30 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
31 | 32 | ||
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index d125a3dc061c..61bc2fa0511e 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/arm/mm/mm-armv.c | 2 | * linux/arch/arm/mm/mm-armv.c |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2002 Russell King | 4 | * Copyright (C) 1998-2005 Russell King |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -305,16 +305,6 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg | |||
305 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); | 305 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); |
306 | } | 306 | } |
307 | 307 | ||
308 | /* | ||
309 | * Clear any PGD mapping. On a two-level page table system, | ||
310 | * the clearance is done by the middle-level functions (pmd) | ||
311 | * rather than the top-level (pgd) functions. | ||
312 | */ | ||
313 | static inline void clear_mapping(unsigned long virt) | ||
314 | { | ||
315 | pmd_clear(pmd_off_k(virt)); | ||
316 | } | ||
317 | |||
318 | struct mem_types { | 308 | struct mem_types { |
319 | unsigned int prot_pte; | 309 | unsigned int prot_pte; |
320 | unsigned int prot_l1; | 310 | unsigned int prot_l1; |
@@ -373,7 +363,7 @@ static struct mem_types mem_types[] __initdata = { | |||
373 | /* | 363 | /* |
374 | * Adjust the PMD section entries according to the CPU in use. | 364 | * Adjust the PMD section entries according to the CPU in use. |
375 | */ | 365 | */ |
376 | static void __init build_mem_type_table(void) | 366 | void __init build_mem_type_table(void) |
377 | { | 367 | { |
378 | struct cachepolicy *cp; | 368 | struct cachepolicy *cp; |
379 | unsigned int cr = get_cr(); | 369 | unsigned int cr = get_cr(); |
@@ -483,25 +473,25 @@ static void __init build_mem_type_table(void) | |||
483 | * offsets, and we take full advantage of sections and | 473 | * offsets, and we take full advantage of sections and |
484 | * supersections. | 474 | * supersections. |
485 | */ | 475 | */ |
486 | static void __init create_mapping(struct map_desc *md) | 476 | void __init create_mapping(struct map_desc *md) |
487 | { | 477 | { |
488 | unsigned long virt, length; | 478 | unsigned long virt, length; |
489 | int prot_sect, prot_l1, domain; | 479 | int prot_sect, prot_l1, domain; |
490 | pgprot_t prot_pte; | 480 | pgprot_t prot_pte; |
491 | long off; | 481 | unsigned long off = (u32)__pfn_to_phys(md->pfn); |
492 | 482 | ||
493 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 483 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
494 | printk(KERN_WARNING "BUG: not creating mapping for " | 484 | printk(KERN_WARNING "BUG: not creating mapping for " |
495 | "0x%08lx at 0x%08lx in user region\n", | 485 | "0x%016llx at 0x%08lx in user region\n", |
496 | md->physical, md->virtual); | 486 | __pfn_to_phys((u64)md->pfn), md->virtual); |
497 | return; | 487 | return; |
498 | } | 488 | } |
499 | 489 | ||
500 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 490 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
501 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | 491 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { |
502 | printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " | 492 | printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx " |
503 | "overlaps vmalloc space\n", | 493 | "overlaps vmalloc space\n", |
504 | md->physical, md->virtual); | 494 | __pfn_to_phys((u64)md->pfn), md->virtual); |
505 | } | 495 | } |
506 | 496 | ||
507 | domain = mem_types[md->type].domain; | 497 | domain = mem_types[md->type].domain; |
@@ -509,15 +499,40 @@ static void __init create_mapping(struct map_desc *md) | |||
509 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | 499 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); |
510 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | 500 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); |
511 | 501 | ||
502 | /* | ||
503 | * Catch 36-bit addresses | ||
504 | */ | ||
505 | if(md->pfn >= 0x100000) { | ||
506 | if(domain) { | ||
507 | printk(KERN_ERR "MM: invalid domain in supersection " | ||
508 | "mapping for 0x%016llx at 0x%08lx\n", | ||
509 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
510 | return; | ||
511 | } | ||
512 | if((md->virtual | md->length | __pfn_to_phys(md->pfn)) | ||
513 | & ~SUPERSECTION_MASK) { | ||
514 | printk(KERN_ERR "MM: cannot create mapping for " | ||
515 | "0x%016llx at 0x%08lx invalid alignment\n", | ||
516 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
517 | return; | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Shift bits [35:32] of address into bits [23:20] of PMD | ||
522 | * (See ARMv6 spec). | ||
523 | */ | ||
524 | off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | ||
525 | } | ||
526 | |||
512 | virt = md->virtual; | 527 | virt = md->virtual; |
513 | off = md->physical - virt; | 528 | off -= virt; |
514 | length = md->length; | 529 | length = md->length; |
515 | 530 | ||
516 | if (mem_types[md->type].prot_l1 == 0 && | 531 | if (mem_types[md->type].prot_l1 == 0 && |
517 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | 532 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { |
518 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 533 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " |
519 | "be mapped using pages, ignoring.\n", | 534 | "be mapped using pages, ignoring.\n", |
520 | md->physical, md->virtual); | 535 | __pfn_to_phys(md->pfn), md->virtual); |
521 | return; | 536 | return; |
522 | } | 537 | } |
523 | 538 | ||
@@ -535,13 +550,22 @@ static void __init create_mapping(struct map_desc *md) | |||
535 | * of the actual domain assignments in use. | 550 | * of the actual domain assignments in use. |
536 | */ | 551 | */ |
537 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { | 552 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { |
538 | /* Align to supersection boundary */ | 553 | /* |
539 | while ((virt & ~SUPERSECTION_MASK || (virt + off) & | 554 | * Align to supersection boundary if !high pages. |
540 | ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) { | 555 | * High pages have already been checked for proper |
541 | alloc_init_section(virt, virt + off, prot_sect); | 556 | * alignment above and they will fail the SUPSERSECTION_MASK |
542 | 557 | * check because of the way the address is encoded into | |
543 | virt += (PGDIR_SIZE / 2); | 558 | * offset. |
544 | length -= (PGDIR_SIZE / 2); | 559 | */ |
560 | if (md->pfn <= 0x100000) { | ||
561 | while ((virt & ~SUPERSECTION_MASK || | ||
562 | (virt + off) & ~SUPERSECTION_MASK) && | ||
563 | length >= (PGDIR_SIZE / 2)) { | ||
564 | alloc_init_section(virt, virt + off, prot_sect); | ||
565 | |||
566 | virt += (PGDIR_SIZE / 2); | ||
567 | length -= (PGDIR_SIZE / 2); | ||
568 | } | ||
545 | } | 569 | } |
546 | 570 | ||
547 | while (length >= SUPERSECTION_SIZE) { | 571 | while (length >= SUPERSECTION_SIZE) { |
@@ -601,100 +625,6 @@ void setup_mm_for_reboot(char mode) | |||
601 | } | 625 | } |
602 | } | 626 | } |
603 | 627 | ||
604 | extern void _stext, _etext; | ||
605 | |||
606 | /* | ||
607 | * Setup initial mappings. We use the page we allocated for zero page to hold | ||
608 | * the mappings, which will get overwritten by the vectors in traps_init(). | ||
609 | * The mappings must be in virtual address order. | ||
610 | */ | ||
611 | void __init memtable_init(struct meminfo *mi) | ||
612 | { | ||
613 | struct map_desc *init_maps, *p, *q; | ||
614 | unsigned long address = 0; | ||
615 | int i; | ||
616 | |||
617 | build_mem_type_table(); | ||
618 | |||
619 | init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE); | ||
620 | |||
621 | #ifdef CONFIG_XIP_KERNEL | ||
622 | p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK; | ||
623 | p->virtual = (unsigned long)&_stext & PMD_MASK; | ||
624 | p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; | ||
625 | p->type = MT_ROM; | ||
626 | p ++; | ||
627 | #endif | ||
628 | |||
629 | for (i = 0; i < mi->nr_banks; i++) { | ||
630 | if (mi->bank[i].size == 0) | ||
631 | continue; | ||
632 | |||
633 | p->physical = mi->bank[i].start; | ||
634 | p->virtual = __phys_to_virt(p->physical); | ||
635 | p->length = mi->bank[i].size; | ||
636 | p->type = MT_MEMORY; | ||
637 | p ++; | ||
638 | } | ||
639 | |||
640 | #ifdef FLUSH_BASE | ||
641 | p->physical = FLUSH_BASE_PHYS; | ||
642 | p->virtual = FLUSH_BASE; | ||
643 | p->length = PGDIR_SIZE; | ||
644 | p->type = MT_CACHECLEAN; | ||
645 | p ++; | ||
646 | #endif | ||
647 | |||
648 | #ifdef FLUSH_BASE_MINICACHE | ||
649 | p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE; | ||
650 | p->virtual = FLUSH_BASE_MINICACHE; | ||
651 | p->length = PGDIR_SIZE; | ||
652 | p->type = MT_MINICLEAN; | ||
653 | p ++; | ||
654 | #endif | ||
655 | |||
656 | /* | ||
657 | * Go through the initial mappings, but clear out any | ||
658 | * pgdir entries that are not in the description. | ||
659 | */ | ||
660 | q = init_maps; | ||
661 | do { | ||
662 | if (address < q->virtual || q == p) { | ||
663 | clear_mapping(address); | ||
664 | address += PGDIR_SIZE; | ||
665 | } else { | ||
666 | create_mapping(q); | ||
667 | |||
668 | address = q->virtual + q->length; | ||
669 | address = (address + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
670 | |||
671 | q ++; | ||
672 | } | ||
673 | } while (address != 0); | ||
674 | |||
675 | /* | ||
676 | * Create a mapping for the machine vectors at the high-vectors | ||
677 | * location (0xffff0000). If we aren't using high-vectors, also | ||
678 | * create a mapping at the low-vectors virtual address. | ||
679 | */ | ||
680 | init_maps->physical = virt_to_phys(init_maps); | ||
681 | init_maps->virtual = 0xffff0000; | ||
682 | init_maps->length = PAGE_SIZE; | ||
683 | init_maps->type = MT_HIGH_VECTORS; | ||
684 | create_mapping(init_maps); | ||
685 | |||
686 | if (!vectors_high()) { | ||
687 | init_maps->virtual = 0; | ||
688 | init_maps->type = MT_LOW_VECTORS; | ||
689 | create_mapping(init_maps); | ||
690 | } | ||
691 | |||
692 | flush_cache_all(); | ||
693 | local_flush_tlb_all(); | ||
694 | |||
695 | top_pmd = pmd_off_k(0xffff0000); | ||
696 | } | ||
697 | |||
698 | /* | 628 | /* |
699 | * Create the architecture specific mappings | 629 | * Create the architecture specific mappings |
700 | */ | 630 | */ |