diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-05 22:23:33 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-05 22:23:33 -0400 |
commit | 7c8c6b9776fb41134d87ef50706a777a45d61cd4 (patch) | |
tree | 5f3cc71e34bc244d53364e103a9746bfe92da9ae /arch/powerpc | |
parent | 9b6b563c0d2d25ecc3111916031aa7255543fbfb (diff) |
powerpc: Merge lmb.c and make MM initialization use it.
This also creates merged versions of do_init_bootmem, paging_init
and mem_init and moves them to arch/powerpc/mm/mem.c. It gets rid
of the mem_pieces stuff.
I made memory_limit a parameter to lmb_enforce_memory_limit rather
than a global referenced by that function. This will require some
small changes to ppc64 if we want to continue building ARCH=ppc64
using the merged lmb.c.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/mm/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/init.c | 365 | ||||
-rw-r--r-- | arch/powerpc/mm/init64.c | 126 | ||||
-rw-r--r-- | arch/powerpc/mm/lmb.c | 296 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 185 | ||||
-rw-r--r-- | arch/powerpc/mm/mem_pieces.c | 163 | ||||
-rw-r--r-- | arch/powerpc/mm/mem_pieces.h | 48 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu.c | 17 |
10 files changed, 509 insertions, 700 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 9f52c26acd86..afd3be112b79 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -2,9 +2,9 @@ | |||
2 | # Makefile for the linux ppc-specific parts of the memory manager. | 2 | # Makefile for the linux ppc-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := fault.o mem.o | 5 | obj-y := fault.o mem.o lmb.o |
6 | obj-$(CONFIG_PPC32) += init.o pgtable.o mmu_context.o \ | 6 | obj-$(CONFIG_PPC32) += init.o pgtable.o mmu_context.o \ |
7 | mem_pieces.o tlb.o | 7 | tlb.o |
8 | obj-$(CONFIG_PPC64) += init64.o pgtable64.o mmu_context64.o | 8 | obj-$(CONFIG_PPC64) += init64.o pgtable64.o mmu_context64.o |
9 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu.o hash_32.o | 9 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu.o hash_32.o |
10 | obj-$(CONFIG_40x) += 4xx_mmu.o | 10 | obj-$(CONFIG_40x) += 4xx_mmu.o |
diff --git a/arch/powerpc/mm/init.c b/arch/powerpc/mm/init.c index 3a81ef15c67e..bf13c14e66b3 100644 --- a/arch/powerpc/mm/init.c +++ b/arch/powerpc/mm/init.c | |||
@@ -45,8 +45,9 @@ | |||
45 | #include <asm/tlb.h> | 45 | #include <asm/tlb.h> |
46 | #include <asm/bootinfo.h> | 46 | #include <asm/bootinfo.h> |
47 | #include <asm/prom.h> | 47 | #include <asm/prom.h> |
48 | #include <asm/lmb.h> | ||
49 | #include <asm/sections.h> | ||
48 | 50 | ||
49 | #include "mem_pieces.h" | ||
50 | #include "mmu_decl.h" | 51 | #include "mmu_decl.h" |
51 | 52 | ||
52 | #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) | 53 | #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) |
@@ -65,17 +66,11 @@ unsigned long total_lowmem; | |||
65 | unsigned long ppc_memstart; | 66 | unsigned long ppc_memstart; |
66 | unsigned long ppc_memoffset = PAGE_OFFSET; | 67 | unsigned long ppc_memoffset = PAGE_OFFSET; |
67 | 68 | ||
68 | int mem_init_done; | ||
69 | int init_bootmem_done; | ||
70 | int boot_mapsize; | 69 | int boot_mapsize; |
71 | #ifdef CONFIG_PPC_PMAC | 70 | #ifdef CONFIG_PPC_PMAC |
72 | unsigned long agp_special_page; | 71 | unsigned long agp_special_page; |
73 | #endif | 72 | #endif |
74 | 73 | ||
75 | extern char _end[]; | ||
76 | extern char etext[], _stext[]; | ||
77 | extern char __init_begin, __init_end; | ||
78 | |||
79 | #ifdef CONFIG_HIGHMEM | 74 | #ifdef CONFIG_HIGHMEM |
80 | pte_t *kmap_pte; | 75 | pte_t *kmap_pte; |
81 | pgprot_t kmap_prot; | 76 | pgprot_t kmap_prot; |
@@ -85,15 +80,15 @@ EXPORT_SYMBOL(kmap_pte); | |||
85 | #endif | 80 | #endif |
86 | 81 | ||
87 | void MMU_init(void); | 82 | void MMU_init(void); |
88 | void set_phys_avail(unsigned long total_ram); | ||
89 | 83 | ||
90 | /* XXX should be in current.h -- paulus */ | 84 | /* XXX should be in current.h -- paulus */ |
91 | extern struct task_struct *current_set[NR_CPUS]; | 85 | extern struct task_struct *current_set[NR_CPUS]; |
92 | 86 | ||
93 | char *klimit = _end; | 87 | char *klimit = _end; |
94 | struct mem_pieces phys_avail; | ||
95 | struct device_node *memory_node; | 88 | struct device_node *memory_node; |
96 | 89 | ||
90 | extern int init_bootmem_done; | ||
91 | |||
97 | /* | 92 | /* |
98 | * this tells the system to map all of ram with the segregs | 93 | * this tells the system to map all of ram with the segregs |
99 | * (i.e. page tables) instead of the bats. | 94 | * (i.e. page tables) instead of the bats. |
@@ -102,84 +97,14 @@ struct device_node *memory_node; | |||
102 | int __map_without_bats; | 97 | int __map_without_bats; |
103 | int __map_without_ltlbs; | 98 | int __map_without_ltlbs; |
104 | 99 | ||
105 | /* max amount of RAM to use */ | ||
106 | unsigned long __max_memory; | ||
107 | /* max amount of low RAM to map in */ | 100 | /* max amount of low RAM to map in */ |
108 | unsigned long __max_low_memory = MAX_LOW_MEM; | 101 | unsigned long __max_low_memory = MAX_LOW_MEM; |
109 | 102 | ||
110 | /* | 103 | /* |
111 | * Read in a property describing some pieces of memory. | 104 | * limit of what is accessible with initial MMU setup - |
105 | * 256MB usually, but only 16MB on 601. | ||
112 | */ | 106 | */ |
113 | static int __init get_mem_prop(char *name, struct mem_pieces *mp) | 107 | unsigned long __initial_memory_limit = 0x10000000; |
114 | { | ||
115 | struct reg_property *rp; | ||
116 | int i, s; | ||
117 | unsigned int *ip; | ||
118 | int nac = prom_n_addr_cells(memory_node); | ||
119 | int nsc = prom_n_size_cells(memory_node); | ||
120 | |||
121 | ip = (unsigned int *) get_property(memory_node, name, &s); | ||
122 | if (ip == NULL) { | ||
123 | printk(KERN_ERR "error: couldn't get %s property on /memory\n", | ||
124 | name); | ||
125 | return 0; | ||
126 | } | ||
127 | s /= (nsc + nac) * 4; | ||
128 | rp = mp->regions; | ||
129 | for (i = 0; i < s; ++i, ip += nac+nsc) { | ||
130 | if (nac >= 2 && ip[nac-2] != 0) | ||
131 | continue; | ||
132 | rp->address = ip[nac-1]; | ||
133 | if (nsc >= 2 && ip[nac+nsc-2] != 0) | ||
134 | rp->size = ~0U; | ||
135 | else | ||
136 | rp->size = ip[nac+nsc-1]; | ||
137 | ++rp; | ||
138 | } | ||
139 | mp->n_regions = rp - mp->regions; | ||
140 | |||
141 | /* Make sure the pieces are sorted. */ | ||
142 | mem_pieces_sort(mp); | ||
143 | mem_pieces_coalesce(mp); | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * Collect information about physical RAM and which pieces are | ||
149 | * already in use from the device tree. | ||
150 | */ | ||
151 | unsigned long __init find_end_of_memory(void) | ||
152 | { | ||
153 | unsigned long a, total; | ||
154 | struct mem_pieces phys_mem; | ||
155 | |||
156 | /* | ||
157 | * Find out where physical memory is, and check that it | ||
158 | * starts at 0 and is contiguous. It seems that RAM is | ||
159 | * always physically contiguous on Power Macintoshes. | ||
160 | * | ||
161 | * Supporting discontiguous physical memory isn't hard, | ||
162 | * it just makes the virtual <-> physical mapping functions | ||
163 | * more complicated (or else you end up wasting space | ||
164 | * in mem_map). | ||
165 | */ | ||
166 | memory_node = find_devices("memory"); | ||
167 | if (memory_node == NULL || !get_mem_prop("reg", &phys_mem) | ||
168 | || phys_mem.n_regions == 0) | ||
169 | panic("No RAM??"); | ||
170 | a = phys_mem.regions[0].address; | ||
171 | if (a != 0) | ||
172 | panic("RAM doesn't start at physical address 0"); | ||
173 | total = phys_mem.regions[0].size; | ||
174 | |||
175 | if (phys_mem.n_regions > 1) { | ||
176 | printk("RAM starting at 0x%x is not contiguous\n", | ||
177 | phys_mem.regions[1].address); | ||
178 | printk("Using RAM from 0 to 0x%lx\n", total-1); | ||
179 | } | ||
180 | |||
181 | return total; | ||
182 | } | ||
183 | 108 | ||
184 | /* | 109 | /* |
185 | * Check for command-line options that affect what MMU_init will do. | 110 | * Check for command-line options that affect what MMU_init will do. |
@@ -194,27 +119,6 @@ void MMU_setup(void) | |||
194 | if (strstr(cmd_line, "noltlbs")) { | 119 | if (strstr(cmd_line, "noltlbs")) { |
195 | __map_without_ltlbs = 1; | 120 | __map_without_ltlbs = 1; |
196 | } | 121 | } |
197 | |||
198 | /* Look for mem= option on command line */ | ||
199 | if (strstr(cmd_line, "mem=")) { | ||
200 | char *p, *q; | ||
201 | unsigned long maxmem = 0; | ||
202 | |||
203 | for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { | ||
204 | q = p + 4; | ||
205 | if (p > cmd_line && p[-1] != ' ') | ||
206 | continue; | ||
207 | maxmem = simple_strtoul(q, &q, 0); | ||
208 | if (*q == 'k' || *q == 'K') { | ||
209 | maxmem <<= 10; | ||
210 | ++q; | ||
211 | } else if (*q == 'm' || *q == 'M') { | ||
212 | maxmem <<= 20; | ||
213 | ++q; | ||
214 | } | ||
215 | } | ||
216 | __max_memory = maxmem; | ||
217 | } | ||
218 | } | 122 | } |
219 | 123 | ||
220 | /* | 124 | /* |
@@ -227,23 +131,22 @@ void __init MMU_init(void) | |||
227 | if (ppc_md.progress) | 131 | if (ppc_md.progress) |
228 | ppc_md.progress("MMU:enter", 0x111); | 132 | ppc_md.progress("MMU:enter", 0x111); |
229 | 133 | ||
134 | /* 601 can only access 16MB at the moment */ | ||
135 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
136 | __initial_memory_limit = 0x01000000; | ||
137 | |||
230 | /* parse args from command line */ | 138 | /* parse args from command line */ |
231 | MMU_setup(); | 139 | MMU_setup(); |
232 | 140 | ||
233 | /* | 141 | if (lmb.memory.cnt > 1) { |
234 | * Figure out how much memory we have, how much | 142 | lmb.memory.cnt = 1; |
235 | * is lowmem, and how much is highmem. If we were | 143 | lmb_analyze(); |
236 | * passed the total memory size from the bootloader, | 144 | printk(KERN_WARNING "Only using first contiguous memory region"); |
237 | * just use it. | 145 | } |
238 | */ | 146 | |
239 | if (boot_mem_size) | 147 | total_memory = lmb_end_of_DRAM(); |
240 | total_memory = boot_mem_size; | ||
241 | else | ||
242 | total_memory = find_end_of_memory(); | ||
243 | |||
244 | if (__max_memory && total_memory > __max_memory) | ||
245 | total_memory = __max_memory; | ||
246 | total_lowmem = total_memory; | 148 | total_lowmem = total_memory; |
149 | |||
247 | #ifdef CONFIG_FSL_BOOKE | 150 | #ifdef CONFIG_FSL_BOOKE |
248 | /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB | 151 | /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB |
249 | * entries, so we need to adjust lowmem to match the amount we can map | 152 | * entries, so we need to adjust lowmem to match the amount we can map |
@@ -256,7 +159,6 @@ void __init MMU_init(void) | |||
256 | total_memory = total_lowmem; | 159 | total_memory = total_lowmem; |
257 | #endif /* CONFIG_HIGHMEM */ | 160 | #endif /* CONFIG_HIGHMEM */ |
258 | } | 161 | } |
259 | set_phys_avail(total_lowmem); | ||
260 | 162 | ||
261 | /* Initialize the MMU hardware */ | 163 | /* Initialize the MMU hardware */ |
262 | if (ppc_md.progress) | 164 | if (ppc_md.progress) |
@@ -303,7 +205,8 @@ void __init *early_get_page(void) | |||
303 | if (init_bootmem_done) { | 205 | if (init_bootmem_done) { |
304 | p = alloc_bootmem_pages(PAGE_SIZE); | 206 | p = alloc_bootmem_pages(PAGE_SIZE); |
305 | } else { | 207 | } else { |
306 | p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE); | 208 | p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, |
209 | __initial_memory_limit)); | ||
307 | } | 210 | } |
308 | return p; | 211 | return p; |
309 | } | 212 | } |
@@ -353,229 +256,3 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
353 | } | 256 | } |
354 | } | 257 | } |
355 | #endif | 258 | #endif |
356 | |||
357 | /* | ||
358 | * Initialize the bootmem system and give it all the memory we | ||
359 | * have available. | ||
360 | */ | ||
361 | void __init do_init_bootmem(void) | ||
362 | { | ||
363 | unsigned long start, size; | ||
364 | int i; | ||
365 | |||
366 | /* | ||
367 | * Find an area to use for the bootmem bitmap. | ||
368 | * We look for the first area which is at least | ||
369 | * 128kB in length (128kB is enough for a bitmap | ||
370 | * for 4GB of memory, using 4kB pages), plus 1 page | ||
371 | * (in case the address isn't page-aligned). | ||
372 | */ | ||
373 | start = 0; | ||
374 | size = 0; | ||
375 | for (i = 0; i < phys_avail.n_regions; ++i) { | ||
376 | unsigned long a = phys_avail.regions[i].address; | ||
377 | unsigned long s = phys_avail.regions[i].size; | ||
378 | if (s <= size) | ||
379 | continue; | ||
380 | start = a; | ||
381 | size = s; | ||
382 | if (s >= 33 * PAGE_SIZE) | ||
383 | break; | ||
384 | } | ||
385 | start = PAGE_ALIGN(start); | ||
386 | |||
387 | min_low_pfn = start >> PAGE_SHIFT; | ||
388 | max_low_pfn = (PPC_MEMSTART + total_lowmem) >> PAGE_SHIFT; | ||
389 | max_pfn = (PPC_MEMSTART + total_memory) >> PAGE_SHIFT; | ||
390 | boot_mapsize = init_bootmem_node(&contig_page_data, min_low_pfn, | ||
391 | PPC_MEMSTART >> PAGE_SHIFT, | ||
392 | max_low_pfn); | ||
393 | |||
394 | /* remove the bootmem bitmap from the available memory */ | ||
395 | mem_pieces_remove(&phys_avail, start, boot_mapsize, 1); | ||
396 | |||
397 | /* add everything in phys_avail into the bootmem map */ | ||
398 | for (i = 0; i < phys_avail.n_regions; ++i) | ||
399 | free_bootmem(phys_avail.regions[i].address, | ||
400 | phys_avail.regions[i].size); | ||
401 | |||
402 | init_bootmem_done = 1; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * paging_init() sets up the page tables - in fact we've already done this. | ||
407 | */ | ||
408 | void __init paging_init(void) | ||
409 | { | ||
410 | unsigned long zones_size[MAX_NR_ZONES], i; | ||
411 | |||
412 | #ifdef CONFIG_HIGHMEM | ||
413 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | ||
414 | pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k | ||
415 | (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); | ||
416 | map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ | ||
417 | kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k | ||
418 | (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); | ||
419 | kmap_prot = PAGE_KERNEL; | ||
420 | #endif /* CONFIG_HIGHMEM */ | ||
421 | |||
422 | /* | ||
423 | * All pages are DMA-able so we put them all in the DMA zone. | ||
424 | */ | ||
425 | zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; | ||
426 | for (i = 1; i < MAX_NR_ZONES; i++) | ||
427 | zones_size[i] = 0; | ||
428 | |||
429 | #ifdef CONFIG_HIGHMEM | ||
430 | zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; | ||
431 | #endif /* CONFIG_HIGHMEM */ | ||
432 | |||
433 | free_area_init(zones_size); | ||
434 | } | ||
435 | |||
436 | void __init mem_init(void) | ||
437 | { | ||
438 | unsigned long addr; | ||
439 | int codepages = 0; | ||
440 | int datapages = 0; | ||
441 | int initpages = 0; | ||
442 | #ifdef CONFIG_HIGHMEM | ||
443 | unsigned long highmem_mapnr; | ||
444 | |||
445 | highmem_mapnr = total_lowmem >> PAGE_SHIFT; | ||
446 | #endif /* CONFIG_HIGHMEM */ | ||
447 | max_mapnr = total_memory >> PAGE_SHIFT; | ||
448 | |||
449 | high_memory = (void *) __va(PPC_MEMSTART + total_lowmem); | ||
450 | num_physpages = max_mapnr; /* RAM is assumed contiguous */ | ||
451 | |||
452 | totalram_pages += free_all_bootmem(); | ||
453 | |||
454 | #ifdef CONFIG_BLK_DEV_INITRD | ||
455 | /* if we are booted from BootX with an initial ramdisk, | ||
456 | make sure the ramdisk pages aren't reserved. */ | ||
457 | if (initrd_start) { | ||
458 | for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE) | ||
459 | ClearPageReserved(virt_to_page(addr)); | ||
460 | } | ||
461 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
462 | |||
463 | #ifdef CONFIG_PPC_OF | ||
464 | /* mark the RTAS pages as reserved */ | ||
465 | if ( rtas_data ) | ||
466 | for (addr = (ulong)__va(rtas_data); | ||
467 | addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ; | ||
468 | addr += PAGE_SIZE) | ||
469 | SetPageReserved(virt_to_page(addr)); | ||
470 | #endif | ||
471 | #ifdef CONFIG_PPC_PMAC | ||
472 | if (agp_special_page) | ||
473 | SetPageReserved(virt_to_page(agp_special_page)); | ||
474 | #endif | ||
475 | for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory; | ||
476 | addr += PAGE_SIZE) { | ||
477 | if (!PageReserved(virt_to_page(addr))) | ||
478 | continue; | ||
479 | if (addr < (ulong) etext) | ||
480 | codepages++; | ||
481 | else if (addr >= (unsigned long)&__init_begin | ||
482 | && addr < (unsigned long)&__init_end) | ||
483 | initpages++; | ||
484 | else if (addr < (ulong) klimit) | ||
485 | datapages++; | ||
486 | } | ||
487 | |||
488 | #ifdef CONFIG_HIGHMEM | ||
489 | { | ||
490 | unsigned long pfn; | ||
491 | |||
492 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | ||
493 | struct page *page = mem_map + pfn; | ||
494 | |||
495 | ClearPageReserved(page); | ||
496 | set_page_count(page, 1); | ||
497 | __free_page(page); | ||
498 | totalhigh_pages++; | ||
499 | } | ||
500 | totalram_pages += totalhigh_pages; | ||
501 | } | ||
502 | #endif /* CONFIG_HIGHMEM */ | ||
503 | |||
504 | printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", | ||
505 | (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10), | ||
506 | codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10), | ||
507 | initpages<< (PAGE_SHIFT-10), | ||
508 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); | ||
509 | |||
510 | #ifdef CONFIG_PPC_PMAC | ||
511 | if (agp_special_page) | ||
512 | printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page); | ||
513 | #endif | ||
514 | |||
515 | mem_init_done = 1; | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | * Set phys_avail to the amount of physical memory, | ||
520 | * less the kernel text/data/bss. | ||
521 | */ | ||
522 | void __init | ||
523 | set_phys_avail(unsigned long total_memory) | ||
524 | { | ||
525 | unsigned long kstart, ksize; | ||
526 | |||
527 | /* | ||
528 | * Initially, available physical memory is equivalent to all | ||
529 | * physical memory. | ||
530 | */ | ||
531 | |||
532 | phys_avail.regions[0].address = PPC_MEMSTART; | ||
533 | phys_avail.regions[0].size = total_memory; | ||
534 | phys_avail.n_regions = 1; | ||
535 | |||
536 | /* | ||
537 | * Map out the kernel text/data/bss from the available physical | ||
538 | * memory. | ||
539 | */ | ||
540 | |||
541 | kstart = __pa(_stext); /* should be 0 */ | ||
542 | ksize = PAGE_ALIGN(klimit - _stext); | ||
543 | |||
544 | mem_pieces_remove(&phys_avail, kstart, ksize, 0); | ||
545 | mem_pieces_remove(&phys_avail, 0, 0x4000, 0); | ||
546 | |||
547 | #if defined(CONFIG_BLK_DEV_INITRD) | ||
548 | /* Remove the init RAM disk from the available memory. */ | ||
549 | if (initrd_start) { | ||
550 | mem_pieces_remove(&phys_avail, __pa(initrd_start), | ||
551 | initrd_end - initrd_start, 1); | ||
552 | } | ||
553 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
554 | #ifdef CONFIG_PPC_OF | ||
555 | /* remove the RTAS pages from the available memory */ | ||
556 | if (rtas_data) | ||
557 | mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1); | ||
558 | #endif | ||
559 | #ifdef CONFIG_PPC_PMAC | ||
560 | /* Because of some uninorth weirdness, we need a page of | ||
561 | * memory as high as possible (it must be outside of the | ||
562 | * bus address seen as the AGP aperture). It will be used | ||
563 | * by the r128 DRM driver | ||
564 | * | ||
565 | * FIXME: We need to make sure that page doesn't overlap any of the\ | ||
566 | * above. This could be done by improving mem_pieces_find to be able | ||
567 | * to do a backward search from the end of the list. | ||
568 | */ | ||
569 | if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) { | ||
570 | agp_special_page = (total_memory - PAGE_SIZE); | ||
571 | mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0); | ||
572 | agp_special_page = (unsigned long)__va(agp_special_page); | ||
573 | } | ||
574 | #endif /* CONFIG_PPC_PMAC */ | ||
575 | } | ||
576 | |||
577 | /* Mark some memory as reserved by removing it from phys_avail. */ | ||
578 | void __init reserve_phys_mem(unsigned long start, unsigned long size) | ||
579 | { | ||
580 | mem_pieces_remove(&phys_avail, start, size, 1); | ||
581 | } | ||
diff --git a/arch/powerpc/mm/init64.c b/arch/powerpc/mm/init64.c index 81f6745b31ef..c0ce6a7af3c7 100644 --- a/arch/powerpc/mm/init64.c +++ b/arch/powerpc/mm/init64.c | |||
@@ -166,77 +166,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
166 | } | 166 | } |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | /* | ||
170 | * Initialize the bootmem system and give it all the memory we | ||
171 | * have available. | ||
172 | */ | ||
173 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
174 | void __init do_init_bootmem(void) | ||
175 | { | ||
176 | unsigned long i; | ||
177 | unsigned long start, bootmap_pages; | ||
178 | unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; | ||
179 | int boot_mapsize; | ||
180 | |||
181 | /* | ||
182 | * Find an area to use for the bootmem bitmap. Calculate the size of | ||
183 | * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. | ||
184 | * Add 1 additional page in case the address isn't page-aligned. | ||
185 | */ | ||
186 | bootmap_pages = bootmem_bootmap_pages(total_pages); | ||
187 | |||
188 | start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); | ||
189 | BUG_ON(!start); | ||
190 | |||
191 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); | ||
192 | |||
193 | max_pfn = max_low_pfn; | ||
194 | |||
195 | /* Add all physical memory to the bootmem map, mark each area | ||
196 | * present. | ||
197 | */ | ||
198 | for (i=0; i < lmb.memory.cnt; i++) | ||
199 | free_bootmem(lmb.memory.region[i].base, | ||
200 | lmb_size_bytes(&lmb.memory, i)); | ||
201 | |||
202 | /* reserve the sections we're already using */ | ||
203 | for (i=0; i < lmb.reserved.cnt; i++) | ||
204 | reserve_bootmem(lmb.reserved.region[i].base, | ||
205 | lmb_size_bytes(&lmb.reserved, i)); | ||
206 | |||
207 | for (i=0; i < lmb.memory.cnt; i++) | ||
208 | memory_present(0, lmb_start_pfn(&lmb.memory, i), | ||
209 | lmb_end_pfn(&lmb.memory, i)); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * paging_init() sets up the page tables - in fact we've already done this. | ||
214 | */ | ||
215 | void __init paging_init(void) | ||
216 | { | ||
217 | unsigned long zones_size[MAX_NR_ZONES]; | ||
218 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
219 | unsigned long total_ram = lmb_phys_mem_size(); | ||
220 | unsigned long top_of_ram = lmb_end_of_DRAM(); | ||
221 | |||
222 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | ||
223 | top_of_ram, total_ram); | ||
224 | printk(KERN_INFO "Memory hole size: %ldMB\n", | ||
225 | (top_of_ram - total_ram) >> 20); | ||
226 | /* | ||
227 | * All pages are DMA-able so we put them all in the DMA zone. | ||
228 | */ | ||
229 | memset(zones_size, 0, sizeof(zones_size)); | ||
230 | memset(zholes_size, 0, sizeof(zholes_size)); | ||
231 | |||
232 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | ||
233 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | ||
234 | |||
235 | free_area_init_node(0, NODE_DATA(0), zones_size, | ||
236 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | ||
237 | } | ||
238 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ | ||
239 | |||
240 | static struct kcore_list kcore_vmem; | 169 | static struct kcore_list kcore_vmem; |
241 | 170 | ||
242 | static int __init setup_kcore(void) | 171 | static int __init setup_kcore(void) |
@@ -264,61 +193,6 @@ static int __init setup_kcore(void) | |||
264 | } | 193 | } |
265 | module_init(setup_kcore); | 194 | module_init(setup_kcore); |
266 | 195 | ||
267 | void __init mem_init(void) | ||
268 | { | ||
269 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
270 | int nid; | ||
271 | #endif | ||
272 | pg_data_t *pgdat; | ||
273 | unsigned long i; | ||
274 | struct page *page; | ||
275 | unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; | ||
276 | |||
277 | num_physpages = max_low_pfn; /* RAM is assumed contiguous */ | ||
278 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | ||
279 | |||
280 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
281 | for_each_online_node(nid) { | ||
282 | if (NODE_DATA(nid)->node_spanned_pages != 0) { | ||
283 | printk("freeing bootmem node %x\n", nid); | ||
284 | totalram_pages += | ||
285 | free_all_bootmem_node(NODE_DATA(nid)); | ||
286 | } | ||
287 | } | ||
288 | #else | ||
289 | max_mapnr = num_physpages; | ||
290 | totalram_pages += free_all_bootmem(); | ||
291 | #endif | ||
292 | |||
293 | for_each_pgdat(pgdat) { | ||
294 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
295 | page = pgdat_page_nr(pgdat, i); | ||
296 | if (PageReserved(page)) | ||
297 | reservedpages++; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | codesize = (unsigned long)&_etext - (unsigned long)&_stext; | ||
302 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | ||
303 | datasize = (unsigned long)&_edata - (unsigned long)&__init_end; | ||
304 | bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; | ||
305 | |||
306 | printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " | ||
307 | "%luk reserved, %luk data, %luk bss, %luk init)\n", | ||
308 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), | ||
309 | num_physpages << (PAGE_SHIFT-10), | ||
310 | codesize >> 10, | ||
311 | reservedpages << (PAGE_SHIFT-10), | ||
312 | datasize >> 10, | ||
313 | bsssize >> 10, | ||
314 | initsize >> 10); | ||
315 | |||
316 | mem_init_done = 1; | ||
317 | |||
318 | /* Initialize the vDSO */ | ||
319 | vdso_init(); | ||
320 | } | ||
321 | |||
322 | void __iomem * reserve_phb_iospace(unsigned long size) | 196 | void __iomem * reserve_phb_iospace(unsigned long size) |
323 | { | 197 | { |
324 | void __iomem *virt_addr; | 198 | void __iomem *virt_addr; |
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c new file mode 100644 index 000000000000..9b5aa6808eb8 --- /dev/null +++ b/arch/powerpc/mm/lmb.c | |||
@@ -0,0 +1,296 @@ | |||
1 | /* | ||
2 | * Procedures for maintaining information about logical memory blocks. | ||
3 | * | ||
4 | * Peter Bergner, IBM Corp. June 2001. | ||
5 | * Copyright (C) 2001 Peter Bergner. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/bitops.h> | ||
17 | #include <asm/types.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/prom.h> | ||
20 | #include <asm/lmb.h> | ||
21 | #ifdef CONFIG_PPC32 | ||
22 | #include "mmu_decl.h" /* for __max_low_memory */ | ||
23 | #endif | ||
24 | |||
25 | struct lmb lmb; | ||
26 | |||
27 | #undef DEBUG | ||
28 | |||
29 | void lmb_dump_all(void) | ||
30 | { | ||
31 | #ifdef DEBUG | ||
32 | unsigned long i; | ||
33 | |||
34 | udbg_printf("lmb_dump_all:\n"); | ||
35 | udbg_printf(" memory.cnt = 0x%lx\n", | ||
36 | lmb.memory.cnt); | ||
37 | udbg_printf(" memory.size = 0x%lx\n", | ||
38 | lmb.memory.size); | ||
39 | for (i=0; i < lmb.memory.cnt ;i++) { | ||
40 | udbg_printf(" memory.region[0x%x].base = 0x%lx\n", | ||
41 | i, lmb.memory.region[i].base); | ||
42 | udbg_printf(" .size = 0x%lx\n", | ||
43 | lmb.memory.region[i].size); | ||
44 | } | ||
45 | |||
46 | udbg_printf("\n reserved.cnt = 0x%lx\n", | ||
47 | lmb.reserved.cnt); | ||
48 | udbg_printf(" reserved.size = 0x%lx\n", | ||
49 | lmb.reserved.size); | ||
50 | for (i=0; i < lmb.reserved.cnt ;i++) { | ||
51 | udbg_printf(" reserved.region[0x%x].base = 0x%lx\n", | ||
52 | i, lmb.reserved.region[i].base); | ||
53 | udbg_printf(" .size = 0x%lx\n", | ||
54 | lmb.reserved.region[i].size); | ||
55 | } | ||
56 | #endif /* DEBUG */ | ||
57 | } | ||
58 | |||
59 | static unsigned long __init lmb_addrs_overlap(unsigned long base1, | ||
60 | unsigned long size1, unsigned long base2, unsigned long size2) | ||
61 | { | ||
62 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); | ||
63 | } | ||
64 | |||
65 | static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, | ||
66 | unsigned long base2, unsigned long size2) | ||
67 | { | ||
68 | if (base2 == base1 + size1) | ||
69 | return 1; | ||
70 | else if (base1 == base2 + size2) | ||
71 | return -1; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, | ||
77 | unsigned long r1, unsigned long r2) | ||
78 | { | ||
79 | unsigned long base1 = rgn->region[r1].base; | ||
80 | unsigned long size1 = rgn->region[r1].size; | ||
81 | unsigned long base2 = rgn->region[r2].base; | ||
82 | unsigned long size2 = rgn->region[r2].size; | ||
83 | |||
84 | return lmb_addrs_adjacent(base1, size1, base2, size2); | ||
85 | } | ||
86 | |||
87 | /* Assumption: base addr of region 1 < base addr of region 2 */ | ||
88 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | ||
89 | unsigned long r1, unsigned long r2) | ||
90 | { | ||
91 | unsigned long i; | ||
92 | |||
93 | rgn->region[r1].size += rgn->region[r2].size; | ||
94 | for (i=r2; i < rgn->cnt-1; i++) { | ||
95 | rgn->region[i].base = rgn->region[i+1].base; | ||
96 | rgn->region[i].size = rgn->region[i+1].size; | ||
97 | } | ||
98 | rgn->cnt--; | ||
99 | } | ||
100 | |||
101 | /* This routine called with relocation disabled. */ | ||
102 | void __init lmb_init(void) | ||
103 | { | ||
104 | /* Create a dummy zero size LMB which will get coalesced away later. | ||
105 | * This simplifies the lmb_add() code below... | ||
106 | */ | ||
107 | lmb.memory.region[0].base = 0; | ||
108 | lmb.memory.region[0].size = 0; | ||
109 | lmb.memory.cnt = 1; | ||
110 | |||
111 | /* Ditto. */ | ||
112 | lmb.reserved.region[0].base = 0; | ||
113 | lmb.reserved.region[0].size = 0; | ||
114 | lmb.reserved.cnt = 1; | ||
115 | } | ||
116 | |||
117 | /* This routine may be called with relocation disabled. */ | ||
118 | void __init lmb_analyze(void) | ||
119 | { | ||
120 | int i; | ||
121 | |||
122 | lmb.memory.size = 0; | ||
123 | |||
124 | for (i = 0; i < lmb.memory.cnt; i++) | ||
125 | lmb.memory.size += lmb.memory.region[i].size; | ||
126 | } | ||
127 | |||
128 | /* This routine called with relocation disabled. */ | ||
129 | static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, | ||
130 | unsigned long size) | ||
131 | { | ||
132 | unsigned long i, coalesced = 0; | ||
133 | long adjacent; | ||
134 | |||
135 | /* First try and coalesce this LMB with another. */ | ||
136 | for (i=0; i < rgn->cnt; i++) { | ||
137 | unsigned long rgnbase = rgn->region[i].base; | ||
138 | unsigned long rgnsize = rgn->region[i].size; | ||
139 | |||
140 | adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); | ||
141 | if ( adjacent > 0 ) { | ||
142 | rgn->region[i].base -= size; | ||
143 | rgn->region[i].size += size; | ||
144 | coalesced++; | ||
145 | break; | ||
146 | } | ||
147 | else if ( adjacent < 0 ) { | ||
148 | rgn->region[i].size += size; | ||
149 | coalesced++; | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { | ||
155 | lmb_coalesce_regions(rgn, i, i+1); | ||
156 | coalesced++; | ||
157 | } | ||
158 | |||
159 | if (coalesced) | ||
160 | return coalesced; | ||
161 | if (rgn->cnt >= MAX_LMB_REGIONS) | ||
162 | return -1; | ||
163 | |||
164 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | ||
165 | for (i = rgn->cnt-1; i >= 0; i--) { | ||
166 | if (base < rgn->region[i].base) { | ||
167 | rgn->region[i+1].base = rgn->region[i].base; | ||
168 | rgn->region[i+1].size = rgn->region[i].size; | ||
169 | } else { | ||
170 | rgn->region[i+1].base = base; | ||
171 | rgn->region[i+1].size = size; | ||
172 | break; | ||
173 | } | ||
174 | } | ||
175 | rgn->cnt++; | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /* This routine may be called with relocation disabled. */ | ||
181 | long __init lmb_add(unsigned long base, unsigned long size) | ||
182 | { | ||
183 | struct lmb_region *_rgn = &(lmb.memory); | ||
184 | |||
185 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | ||
186 | if (base == 0) | ||
187 | lmb.rmo_size = size; | ||
188 | |||
189 | return lmb_add_region(_rgn, base, size); | ||
190 | |||
191 | } | ||
192 | |||
193 | long __init lmb_reserve(unsigned long base, unsigned long size) | ||
194 | { | ||
195 | struct lmb_region *_rgn = &(lmb.reserved); | ||
196 | |||
197 | return lmb_add_region(_rgn, base, size); | ||
198 | } | ||
199 | |||
200 | long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, | ||
201 | unsigned long size) | ||
202 | { | ||
203 | unsigned long i; | ||
204 | |||
205 | for (i=0; i < rgn->cnt; i++) { | ||
206 | unsigned long rgnbase = rgn->region[i].base; | ||
207 | unsigned long rgnsize = rgn->region[i].size; | ||
208 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { | ||
209 | break; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | return (i < rgn->cnt) ? i : -1; | ||
214 | } | ||
215 | |||
216 | unsigned long __init lmb_alloc(unsigned long size, unsigned long align) | ||
217 | { | ||
218 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | ||
219 | } | ||
220 | |||
221 | unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, | ||
222 | unsigned long max_addr) | ||
223 | { | ||
224 | long i, j; | ||
225 | unsigned long base = 0; | ||
226 | |||
227 | #ifdef CONFIG_PPC32 | ||
228 | /* On 32-bit, make sure we allocate lowmem */ | ||
229 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
230 | max_addr = __max_low_memory; | ||
231 | #endif | ||
232 | for (i = lmb.memory.cnt-1; i >= 0; i--) { | ||
233 | unsigned long lmbbase = lmb.memory.region[i].base; | ||
234 | unsigned long lmbsize = lmb.memory.region[i].size; | ||
235 | |||
236 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
237 | base = _ALIGN_DOWN(lmbbase + lmbsize - size, align); | ||
238 | else if (lmbbase < max_addr) { | ||
239 | base = min(lmbbase + lmbsize, max_addr); | ||
240 | base = _ALIGN_DOWN(base - size, align); | ||
241 | } else | ||
242 | continue; | ||
243 | |||
244 | while ((lmbbase <= base) && | ||
245 | ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) | ||
246 | base = _ALIGN_DOWN(lmb.reserved.region[j].base - size, | ||
247 | align); | ||
248 | |||
249 | if ((base != 0) && (lmbbase <= base)) | ||
250 | break; | ||
251 | } | ||
252 | |||
253 | if (i < 0) | ||
254 | return 0; | ||
255 | |||
256 | lmb_add_region(&lmb.reserved, base, size); | ||
257 | |||
258 | return base; | ||
259 | } | ||
260 | |||
261 | /* You must call lmb_analyze() before this. */ | ||
262 | unsigned long __init lmb_phys_mem_size(void) | ||
263 | { | ||
264 | return lmb.memory.size; | ||
265 | } | ||
266 | |||
267 | unsigned long __init lmb_end_of_DRAM(void) | ||
268 | { | ||
269 | int idx = lmb.memory.cnt - 1; | ||
270 | |||
271 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Truncate the lmb list to memory_limit if it's set | ||
276 | * You must call lmb_analyze() after this. | ||
277 | */ | ||
278 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) | ||
279 | { | ||
280 | unsigned long i, limit; | ||
281 | |||
282 | if (! memory_limit) | ||
283 | return; | ||
284 | |||
285 | limit = memory_limit; | ||
286 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
287 | if (limit > lmb.memory.region[i].size) { | ||
288 | limit -= lmb.memory.region[i].size; | ||
289 | continue; | ||
290 | } | ||
291 | |||
292 | lmb.memory.region[i].size = limit; | ||
293 | lmb.memory.cnt = i + 1; | ||
294 | break; | ||
295 | } | ||
296 | } | ||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 345db08e5d20..0650de74d0b3 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -45,8 +45,9 @@ | |||
45 | #include <asm/tlb.h> | 45 | #include <asm/tlb.h> |
46 | #include <asm/bootinfo.h> | 46 | #include <asm/bootinfo.h> |
47 | #include <asm/prom.h> | 47 | #include <asm/prom.h> |
48 | #include <asm/lmb.h> | ||
49 | #include <asm/sections.h> | ||
48 | 50 | ||
49 | #include "mem_pieces.h" | ||
50 | #include "mmu_decl.h" | 51 | #include "mmu_decl.h" |
51 | 52 | ||
52 | #ifndef CPU_FTR_COHERENT_ICACHE | 53 | #ifndef CPU_FTR_COHERENT_ICACHE |
@@ -54,6 +55,9 @@ | |||
54 | #define CPU_FTR_NOEXECUTE 0 | 55 | #define CPU_FTR_NOEXECUTE 0 |
55 | #endif | 56 | #endif |
56 | 57 | ||
58 | int init_bootmem_done; | ||
59 | int mem_init_done; | ||
60 | |||
57 | /* | 61 | /* |
58 | * This is called by /dev/mem to know if a given address has to | 62 | * This is called by /dev/mem to know if a given address has to |
59 | * be mapped non-cacheable or not | 63 | * be mapped non-cacheable or not |
@@ -131,6 +135,185 @@ void show_mem(void) | |||
131 | } | 135 | } |
132 | 136 | ||
133 | /* | 137 | /* |
138 | * Initialize the bootmem system and give it all the memory we | ||
139 | * have available. If we are using highmem, we only put the | ||
140 | * lowmem into the bootmem system. | ||
141 | */ | ||
142 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
143 | void __init do_init_bootmem(void) | ||
144 | { | ||
145 | unsigned long i; | ||
146 | unsigned long start, bootmap_pages; | ||
147 | unsigned long total_pages; | ||
148 | int boot_mapsize; | ||
149 | |||
150 | max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; | ||
151 | #ifdef CONFIG_HIGHMEM | ||
152 | total_pages = total_lowmem >> PAGE_SHIFT; | ||
153 | #endif | ||
154 | |||
155 | /* | ||
156 | * Find an area to use for the bootmem bitmap. Calculate the size of | ||
157 | * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. | ||
158 | * Add 1 additional page in case the address isn't page-aligned. | ||
159 | */ | ||
160 | bootmap_pages = bootmem_bootmap_pages(total_pages); | ||
161 | |||
162 | start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); | ||
163 | BUG_ON(!start); | ||
164 | |||
165 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); | ||
166 | |||
167 | /* Add all physical memory to the bootmem map, mark each area | ||
168 | * present. | ||
169 | */ | ||
170 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
171 | unsigned long base = lmb.memory.region[i].base; | ||
172 | unsigned long size = lmb_size_bytes(&lmb.memory, i); | ||
173 | #ifdef CONFIG_HIGHMEM | ||
174 | if (base >= total_lowmem) | ||
175 | continue; | ||
176 | if (base + size > total_lowmem) | ||
177 | size = total_lowmem - base; | ||
178 | #endif | ||
179 | free_bootmem(base, size); | ||
180 | } | ||
181 | |||
182 | /* reserve the sections we're already using */ | ||
183 | for (i = 0; i < lmb.reserved.cnt; i++) | ||
184 | reserve_bootmem(lmb.reserved.region[i].base, | ||
185 | lmb_size_bytes(&lmb.reserved, i)); | ||
186 | |||
187 | /* XXX need to clip this if using highmem? */ | ||
188 | for (i = 0; i < lmb.memory.cnt; i++) | ||
189 | memory_present(0, lmb_start_pfn(&lmb.memory, i), | ||
190 | lmb_end_pfn(&lmb.memory, i)); | ||
191 | init_bootmem_done = 1; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * paging_init() sets up the page tables - in fact we've already done this. | ||
196 | */ | ||
197 | void __init paging_init(void) | ||
198 | { | ||
199 | unsigned long zones_size[MAX_NR_ZONES]; | ||
200 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
201 | unsigned long total_ram = lmb_phys_mem_size(); | ||
202 | unsigned long top_of_ram = lmb_end_of_DRAM(); | ||
203 | |||
204 | #ifdef CONFIG_HIGHMEM | ||
205 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | ||
206 | pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k | ||
207 | (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); | ||
208 | map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ | ||
209 | kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k | ||
210 | (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); | ||
211 | kmap_prot = PAGE_KERNEL; | ||
212 | #endif /* CONFIG_HIGHMEM */ | ||
213 | |||
214 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | ||
215 | top_of_ram, total_ram); | ||
216 | printk(KERN_INFO "Memory hole size: %ldMB\n", | ||
217 | (top_of_ram - total_ram) >> 20); | ||
218 | /* | ||
219 | * All pages are DMA-able so we put them all in the DMA zone. | ||
220 | */ | ||
221 | memset(zones_size, 0, sizeof(zones_size)); | ||
222 | memset(zholes_size, 0, sizeof(zholes_size)); | ||
223 | |||
224 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | ||
225 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | ||
226 | |||
227 | #ifdef CONFIG_HIGHMEM | ||
228 | zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; | ||
229 | zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; | ||
230 | zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT; | ||
231 | #else | ||
232 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | ||
233 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | ||
234 | #endif /* CONFIG_HIGHMEM */ | ||
235 | |||
236 | free_area_init_node(0, NODE_DATA(0), zones_size, | ||
237 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | ||
238 | } | ||
239 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ | ||
240 | |||
241 | void __init mem_init(void) | ||
242 | { | ||
243 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
244 | int nid; | ||
245 | #endif | ||
246 | pg_data_t *pgdat; | ||
247 | unsigned long i; | ||
248 | struct page *page; | ||
249 | unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; | ||
250 | |||
251 | num_physpages = max_pfn; /* RAM is assumed contiguous */ | ||
252 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | ||
253 | |||
254 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
255 | for_each_online_node(nid) { | ||
256 | if (NODE_DATA(nid)->node_spanned_pages != 0) { | ||
257 | printk("freeing bootmem node %x\n", nid); | ||
258 | totalram_pages += | ||
259 | free_all_bootmem_node(NODE_DATA(nid)); | ||
260 | } | ||
261 | } | ||
262 | #else | ||
263 | max_mapnr = num_physpages; | ||
264 | totalram_pages += free_all_bootmem(); | ||
265 | #endif | ||
266 | for_each_pgdat(pgdat) { | ||
267 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
268 | page = pgdat_page_nr(pgdat, i); | ||
269 | if (PageReserved(page)) | ||
270 | reservedpages++; | ||
271 | } | ||
272 | } | ||
273 | |||
274 | codesize = (unsigned long)&_sdata - (unsigned long)&_stext; | ||
275 | datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata; | ||
276 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | ||
277 | bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; | ||
278 | |||
279 | #ifdef CONFIG_HIGHMEM | ||
280 | { | ||
281 | unsigned long pfn, highmem_mapnr; | ||
282 | |||
283 | highmem_mapnr = total_lowmem >> PAGE_SHIFT; | ||
284 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | ||
285 | struct page *page = pfn_to_page(pfn); | ||
286 | |||
287 | ClearPageReserved(page); | ||
288 | set_page_count(page, 1); | ||
289 | __free_page(page); | ||
290 | totalhigh_pages++; | ||
291 | } | ||
292 | totalram_pages += totalhigh_pages; | ||
293 | printk(KERN_INFO "High memory: %luk\n", | ||
294 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
295 | } | ||
296 | #endif /* CONFIG_HIGHMEM */ | ||
297 | |||
298 | printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " | ||
299 | "%luk reserved, %luk data, %luk bss, %luk init)\n", | ||
300 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), | ||
301 | num_physpages << (PAGE_SHIFT-10), | ||
302 | codesize >> 10, | ||
303 | reservedpages << (PAGE_SHIFT-10), | ||
304 | datasize >> 10, | ||
305 | bsssize >> 10, | ||
306 | initsize >> 10); | ||
307 | |||
308 | mem_init_done = 1; | ||
309 | |||
310 | #ifdef CONFIG_PPC64 | ||
311 | /* Initialize the vDSO */ | ||
312 | vdso_init(); | ||
313 | #endif | ||
314 | } | ||
315 | |||
316 | /* | ||
134 | * This is called when a page has been modified by the kernel. | 317 | * This is called when a page has been modified by the kernel. |
135 | * It just marks the page as not i-cache clean. We do the i-cache | 318 | * It just marks the page as not i-cache clean. We do the i-cache |
136 | * flush later when the page is given to a user process, if necessary. | 319 | * flush later when the page is given to a user process, if necessary. |
diff --git a/arch/powerpc/mm/mem_pieces.c b/arch/powerpc/mm/mem_pieces.c deleted file mode 100644 index 3d639052017e..000000000000 --- a/arch/powerpc/mm/mem_pieces.c +++ /dev/null | |||
@@ -1,163 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
3 | * Changes to accommodate Power Macintoshes. | ||
4 | * Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Rewrites. | ||
6 | * Grant Erickson <grant@lcse.umn.edu> | ||
7 | * General rework and split from mm/init.c. | ||
8 | * | ||
9 | * Module name: mem_pieces.c | ||
10 | * | ||
11 | * Description: | ||
12 | * Routines and data structures for manipulating and representing | ||
13 | * phyiscal memory extents (i.e. address/length pairs). | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <asm/page.h> | ||
22 | |||
23 | #include "mem_pieces.h" | ||
24 | |||
25 | extern struct mem_pieces phys_avail; | ||
26 | |||
27 | static void mem_pieces_print(struct mem_pieces *); | ||
28 | |||
29 | /* | ||
30 | * Scan a region for a piece of a given size with the required alignment. | ||
31 | */ | ||
32 | void __init * | ||
33 | mem_pieces_find(unsigned int size, unsigned int align) | ||
34 | { | ||
35 | int i; | ||
36 | unsigned a, e; | ||
37 | struct mem_pieces *mp = &phys_avail; | ||
38 | |||
39 | for (i = 0; i < mp->n_regions; ++i) { | ||
40 | a = mp->regions[i].address; | ||
41 | e = a + mp->regions[i].size; | ||
42 | a = (a + align - 1) & -align; | ||
43 | if (a + size <= e) { | ||
44 | mem_pieces_remove(mp, a, size, 1); | ||
45 | return (void *) __va(a); | ||
46 | } | ||
47 | } | ||
48 | panic("Couldn't find %u bytes at %u alignment\n", size, align); | ||
49 | |||
50 | return NULL; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Remove some memory from an array of pieces | ||
55 | */ | ||
56 | void __init | ||
57 | mem_pieces_remove(struct mem_pieces *mp, unsigned int start, unsigned int size, | ||
58 | int must_exist) | ||
59 | { | ||
60 | int i, j; | ||
61 | unsigned int end, rs, re; | ||
62 | struct reg_property *rp; | ||
63 | |||
64 | end = start + size; | ||
65 | for (i = 0, rp = mp->regions; i < mp->n_regions; ++i, ++rp) { | ||
66 | if (end > rp->address && start < rp->address + rp->size) | ||
67 | break; | ||
68 | } | ||
69 | if (i >= mp->n_regions) { | ||
70 | if (must_exist) | ||
71 | printk("mem_pieces_remove: [%x,%x) not in any region\n", | ||
72 | start, end); | ||
73 | return; | ||
74 | } | ||
75 | for (; i < mp->n_regions && end > rp->address; ++i, ++rp) { | ||
76 | rs = rp->address; | ||
77 | re = rs + rp->size; | ||
78 | if (must_exist && (start < rs || end > re)) { | ||
79 | printk("mem_pieces_remove: bad overlap [%x,%x) with", | ||
80 | start, end); | ||
81 | mem_pieces_print(mp); | ||
82 | must_exist = 0; | ||
83 | } | ||
84 | if (start > rs) { | ||
85 | rp->size = start - rs; | ||
86 | if (end < re) { | ||
87 | /* need to split this entry */ | ||
88 | if (mp->n_regions >= MEM_PIECES_MAX) | ||
89 | panic("eek... mem_pieces overflow"); | ||
90 | for (j = mp->n_regions; j > i + 1; --j) | ||
91 | mp->regions[j] = mp->regions[j-1]; | ||
92 | ++mp->n_regions; | ||
93 | rp[1].address = end; | ||
94 | rp[1].size = re - end; | ||
95 | } | ||
96 | } else { | ||
97 | if (end < re) { | ||
98 | rp->address = end; | ||
99 | rp->size = re - end; | ||
100 | } else { | ||
101 | /* need to delete this entry */ | ||
102 | for (j = i; j < mp->n_regions - 1; ++j) | ||
103 | mp->regions[j] = mp->regions[j+1]; | ||
104 | --mp->n_regions; | ||
105 | --i; | ||
106 | --rp; | ||
107 | } | ||
108 | } | ||
109 | } | ||
110 | } | ||
111 | |||
112 | static void __init | ||
113 | mem_pieces_print(struct mem_pieces *mp) | ||
114 | { | ||
115 | int i; | ||
116 | |||
117 | for (i = 0; i < mp->n_regions; ++i) | ||
118 | printk(" [%x, %x)", mp->regions[i].address, | ||
119 | mp->regions[i].address + mp->regions[i].size); | ||
120 | printk("\n"); | ||
121 | } | ||
122 | |||
123 | void __init | ||
124 | mem_pieces_sort(struct mem_pieces *mp) | ||
125 | { | ||
126 | unsigned long a, s; | ||
127 | int i, j; | ||
128 | |||
129 | for (i = 1; i < mp->n_regions; ++i) { | ||
130 | a = mp->regions[i].address; | ||
131 | s = mp->regions[i].size; | ||
132 | for (j = i - 1; j >= 0; --j) { | ||
133 | if (a >= mp->regions[j].address) | ||
134 | break; | ||
135 | mp->regions[j+1] = mp->regions[j]; | ||
136 | } | ||
137 | mp->regions[j+1].address = a; | ||
138 | mp->regions[j+1].size = s; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | void __init | ||
143 | mem_pieces_coalesce(struct mem_pieces *mp) | ||
144 | { | ||
145 | unsigned long a, s, ns; | ||
146 | int i, j, d; | ||
147 | |||
148 | d = 0; | ||
149 | for (i = 0; i < mp->n_regions; i = j) { | ||
150 | a = mp->regions[i].address; | ||
151 | s = mp->regions[i].size; | ||
152 | for (j = i + 1; j < mp->n_regions | ||
153 | && mp->regions[j].address - a <= s; ++j) { | ||
154 | ns = mp->regions[j].address + mp->regions[j].size - a; | ||
155 | if (ns > s) | ||
156 | s = ns; | ||
157 | } | ||
158 | mp->regions[d].address = a; | ||
159 | mp->regions[d].size = s; | ||
160 | ++d; | ||
161 | } | ||
162 | mp->n_regions = d; | ||
163 | } | ||
diff --git a/arch/powerpc/mm/mem_pieces.h b/arch/powerpc/mm/mem_pieces.h deleted file mode 100644 index e2b700dc7f18..000000000000 --- a/arch/powerpc/mm/mem_pieces.h +++ /dev/null | |||
@@ -1,48 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
3 | * Changes to accommodate Power Macintoshes. | ||
4 | * Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Rewrites. | ||
6 | * Grant Erickson <grant@lcse.umn.edu> | ||
7 | * General rework and split from mm/init.c. | ||
8 | * | ||
9 | * Module name: mem_pieces.h | ||
10 | * | ||
11 | * Description: | ||
12 | * Routines and data structures for manipulating and representing | ||
13 | * phyiscal memory extents (i.e. address/length pairs). | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __MEM_PIECES_H__ | ||
18 | #define __MEM_PIECES_H__ | ||
19 | |||
20 | #include <asm/prom.h> | ||
21 | |||
22 | #ifdef __cplusplus | ||
23 | extern "C" { | ||
24 | #endif | ||
25 | |||
26 | |||
27 | /* Type Definitions */ | ||
28 | |||
29 | #define MEM_PIECES_MAX 32 | ||
30 | |||
31 | struct mem_pieces { | ||
32 | int n_regions; | ||
33 | struct reg_property regions[MEM_PIECES_MAX]; | ||
34 | }; | ||
35 | |||
36 | /* Function Prototypes */ | ||
37 | |||
38 | extern void *mem_pieces_find(unsigned int size, unsigned int align); | ||
39 | extern void mem_pieces_remove(struct mem_pieces *mp, unsigned int start, | ||
40 | unsigned int size, int must_exist); | ||
41 | extern void mem_pieces_coalesce(struct mem_pieces *mp); | ||
42 | extern void mem_pieces_sort(struct mem_pieces *mp); | ||
43 | |||
44 | #ifdef __cplusplus | ||
45 | } | ||
46 | #endif | ||
47 | |||
48 | #endif /* __MEM_PIECES_H__ */ | ||
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 540f3292b229..06fe8af3af55 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -36,6 +36,8 @@ extern unsigned long ioremap_base; | |||
36 | extern unsigned long ioremap_bot; | 36 | extern unsigned long ioremap_bot; |
37 | extern unsigned int rtas_data, rtas_size; | 37 | extern unsigned int rtas_data, rtas_size; |
38 | 38 | ||
39 | extern unsigned long __max_low_memory; | ||
40 | extern unsigned long __initial_memory_limit; | ||
39 | extern unsigned long total_memory; | 41 | extern unsigned long total_memory; |
40 | extern unsigned long total_lowmem; | 42 | extern unsigned long total_lowmem; |
41 | extern int mem_init_done; | 43 | extern int mem_init_done; |
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 81a3d7446d37..5792e533916f 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -190,8 +190,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) | |||
190 | * Don't allow anybody to remap normal RAM that we're using. | 190 | * Don't allow anybody to remap normal RAM that we're using. |
191 | * mem_init() sets high_memory so only do the check after that. | 191 | * mem_init() sets high_memory so only do the check after that. |
192 | */ | 192 | */ |
193 | if ( mem_init_done && (p < virt_to_phys(high_memory)) ) | 193 | if (mem_init_done && (p < virt_to_phys(high_memory))) { |
194 | { | ||
195 | printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p, | 194 | printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p, |
196 | __builtin_return_address(0)); | 195 | __builtin_return_address(0)); |
197 | return NULL; | 196 | return NULL; |
diff --git a/arch/powerpc/mm/ppc_mmu.c b/arch/powerpc/mm/ppc_mmu.c index 9a381ed5eb21..cef9e83cc7e9 100644 --- a/arch/powerpc/mm/ppc_mmu.c +++ b/arch/powerpc/mm/ppc_mmu.c | |||
@@ -32,9 +32,9 @@ | |||
32 | #include <asm/prom.h> | 32 | #include <asm/prom.h> |
33 | #include <asm/mmu.h> | 33 | #include <asm/mmu.h> |
34 | #include <asm/machdep.h> | 34 | #include <asm/machdep.h> |
35 | #include <asm/lmb.h> | ||
35 | 36 | ||
36 | #include "mmu_decl.h" | 37 | #include "mmu_decl.h" |
37 | #include "mem_pieces.h" | ||
38 | 38 | ||
39 | PTE *Hash, *Hash_end; | 39 | PTE *Hash, *Hash_end; |
40 | unsigned long Hash_size, Hash_mask; | 40 | unsigned long Hash_size, Hash_mask; |
@@ -215,17 +215,6 @@ void __init MMU_init_hw(void) | |||
215 | #define MIN_N_HPTEG 1024 /* min 64kB hash table */ | 215 | #define MIN_N_HPTEG 1024 /* min 64kB hash table */ |
216 | #endif | 216 | #endif |
217 | 217 | ||
218 | #ifdef CONFIG_POWER4 | ||
219 | /* The hash table has already been allocated and initialized | ||
220 | in prom.c */ | ||
221 | n_hpteg = Hash_size >> LG_HPTEG_SIZE; | ||
222 | lg_n_hpteg = __ilog2(n_hpteg); | ||
223 | |||
224 | /* Remove the hash table from the available memory */ | ||
225 | if (Hash) | ||
226 | reserve_phys_mem(__pa(Hash), Hash_size); | ||
227 | |||
228 | #else /* CONFIG_POWER4 */ | ||
229 | /* | 218 | /* |
230 | * Allow 1 HPTE (1/8 HPTEG) for each page of memory. | 219 | * Allow 1 HPTE (1/8 HPTEG) for each page of memory. |
231 | * This is less than the recommended amount, but then | 220 | * This is less than the recommended amount, but then |
@@ -245,10 +234,10 @@ void __init MMU_init_hw(void) | |||
245 | * Find some memory for the hash table. | 234 | * Find some memory for the hash table. |
246 | */ | 235 | */ |
247 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); | 236 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); |
248 | Hash = mem_pieces_find(Hash_size, Hash_size); | 237 | Hash = __va(lmb_alloc_base(Hash_size, Hash_size, |
238 | __initial_memory_limit)); | ||
249 | cacheable_memzero(Hash, Hash_size); | 239 | cacheable_memzero(Hash, Hash_size); |
250 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; | 240 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; |
251 | #endif /* CONFIG_POWER4 */ | ||
252 | 241 | ||
253 | Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); | 242 | Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); |
254 | 243 | ||