aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/init.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-05 22:23:33 -0400
committerPaul Mackerras <paulus@samba.org>2005-10-05 22:23:33 -0400
commit7c8c6b9776fb41134d87ef50706a777a45d61cd4 (patch)
tree5f3cc71e34bc244d53364e103a9746bfe92da9ae /arch/powerpc/mm/init.c
parent9b6b563c0d2d25ecc3111916031aa7255543fbfb (diff)
powerpc: Merge lmb.c and make MM initialization use it.
This also creates merged versions of do_init_bootmem, paging_init and mem_init and moves them to arch/powerpc/mm/mem.c. It gets rid of the mem_pieces stuff. I made memory_limit a parameter to lmb_enforce_memory_limit rather than a global referenced by that function. This will require some small changes to ppc64 if we want to continue building ARCH=ppc64 using the merged lmb.c. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/init.c')
-rw-r--r--arch/powerpc/mm/init.c365
1 files changed, 21 insertions, 344 deletions
diff --git a/arch/powerpc/mm/init.c b/arch/powerpc/mm/init.c
index 3a81ef15c67e..bf13c14e66b3 100644
--- a/arch/powerpc/mm/init.c
+++ b/arch/powerpc/mm/init.c
@@ -45,8 +45,9 @@
45#include <asm/tlb.h> 45#include <asm/tlb.h>
46#include <asm/bootinfo.h> 46#include <asm/bootinfo.h>
47#include <asm/prom.h> 47#include <asm/prom.h>
48#include <asm/lmb.h>
49#include <asm/sections.h>
48 50
49#include "mem_pieces.h"
50#include "mmu_decl.h" 51#include "mmu_decl.h"
51 52
52#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) 53#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
@@ -65,17 +66,11 @@ unsigned long total_lowmem;
65unsigned long ppc_memstart; 66unsigned long ppc_memstart;
66unsigned long ppc_memoffset = PAGE_OFFSET; 67unsigned long ppc_memoffset = PAGE_OFFSET;
67 68
68int mem_init_done;
69int init_bootmem_done;
70int boot_mapsize; 69int boot_mapsize;
71#ifdef CONFIG_PPC_PMAC 70#ifdef CONFIG_PPC_PMAC
72unsigned long agp_special_page; 71unsigned long agp_special_page;
73#endif 72#endif
74 73
75extern char _end[];
76extern char etext[], _stext[];
77extern char __init_begin, __init_end;
78
79#ifdef CONFIG_HIGHMEM 74#ifdef CONFIG_HIGHMEM
80pte_t *kmap_pte; 75pte_t *kmap_pte;
81pgprot_t kmap_prot; 76pgprot_t kmap_prot;
@@ -85,15 +80,15 @@ EXPORT_SYMBOL(kmap_pte);
85#endif 80#endif
86 81
87void MMU_init(void); 82void MMU_init(void);
88void set_phys_avail(unsigned long total_ram);
89 83
90/* XXX should be in current.h -- paulus */ 84/* XXX should be in current.h -- paulus */
91extern struct task_struct *current_set[NR_CPUS]; 85extern struct task_struct *current_set[NR_CPUS];
92 86
93char *klimit = _end; 87char *klimit = _end;
94struct mem_pieces phys_avail;
95struct device_node *memory_node; 88struct device_node *memory_node;
96 89
90extern int init_bootmem_done;
91
97/* 92/*
98 * this tells the system to map all of ram with the segregs 93 * this tells the system to map all of ram with the segregs
99 * (i.e. page tables) instead of the bats. 94 * (i.e. page tables) instead of the bats.
@@ -102,84 +97,14 @@ struct device_node *memory_node;
102int __map_without_bats; 97int __map_without_bats;
103int __map_without_ltlbs; 98int __map_without_ltlbs;
104 99
105/* max amount of RAM to use */
106unsigned long __max_memory;
107/* max amount of low RAM to map in */ 100/* max amount of low RAM to map in */
108unsigned long __max_low_memory = MAX_LOW_MEM; 101unsigned long __max_low_memory = MAX_LOW_MEM;
109 102
110/* 103/*
111 * Read in a property describing some pieces of memory. 104 * limit of what is accessible with initial MMU setup -
105 * 256MB usually, but only 16MB on 601.
112 */ 106 */
113static int __init get_mem_prop(char *name, struct mem_pieces *mp) 107unsigned long __initial_memory_limit = 0x10000000;
114{
115 struct reg_property *rp;
116 int i, s;
117 unsigned int *ip;
118 int nac = prom_n_addr_cells(memory_node);
119 int nsc = prom_n_size_cells(memory_node);
120
121 ip = (unsigned int *) get_property(memory_node, name, &s);
122 if (ip == NULL) {
123 printk(KERN_ERR "error: couldn't get %s property on /memory\n",
124 name);
125 return 0;
126 }
127 s /= (nsc + nac) * 4;
128 rp = mp->regions;
129 for (i = 0; i < s; ++i, ip += nac+nsc) {
130 if (nac >= 2 && ip[nac-2] != 0)
131 continue;
132 rp->address = ip[nac-1];
133 if (nsc >= 2 && ip[nac+nsc-2] != 0)
134 rp->size = ~0U;
135 else
136 rp->size = ip[nac+nsc-1];
137 ++rp;
138 }
139 mp->n_regions = rp - mp->regions;
140
141 /* Make sure the pieces are sorted. */
142 mem_pieces_sort(mp);
143 mem_pieces_coalesce(mp);
144 return 1;
145}
146
147/*
148 * Collect information about physical RAM and which pieces are
149 * already in use from the device tree.
150 */
151unsigned long __init find_end_of_memory(void)
152{
153 unsigned long a, total;
154 struct mem_pieces phys_mem;
155
156 /*
157 * Find out where physical memory is, and check that it
158 * starts at 0 and is contiguous. It seems that RAM is
159 * always physically contiguous on Power Macintoshes.
160 *
161 * Supporting discontiguous physical memory isn't hard,
162 * it just makes the virtual <-> physical mapping functions
163 * more complicated (or else you end up wasting space
164 * in mem_map).
165 */
166 memory_node = find_devices("memory");
167 if (memory_node == NULL || !get_mem_prop("reg", &phys_mem)
168 || phys_mem.n_regions == 0)
169 panic("No RAM??");
170 a = phys_mem.regions[0].address;
171 if (a != 0)
172 panic("RAM doesn't start at physical address 0");
173 total = phys_mem.regions[0].size;
174
175 if (phys_mem.n_regions > 1) {
176 printk("RAM starting at 0x%x is not contiguous\n",
177 phys_mem.regions[1].address);
178 printk("Using RAM from 0 to 0x%lx\n", total-1);
179 }
180
181 return total;
182}
183 108
184/* 109/*
185 * Check for command-line options that affect what MMU_init will do. 110 * Check for command-line options that affect what MMU_init will do.
@@ -194,27 +119,6 @@ void MMU_setup(void)
194 if (strstr(cmd_line, "noltlbs")) { 119 if (strstr(cmd_line, "noltlbs")) {
195 __map_without_ltlbs = 1; 120 __map_without_ltlbs = 1;
196 } 121 }
197
198 /* Look for mem= option on command line */
199 if (strstr(cmd_line, "mem=")) {
200 char *p, *q;
201 unsigned long maxmem = 0;
202
203 for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
204 q = p + 4;
205 if (p > cmd_line && p[-1] != ' ')
206 continue;
207 maxmem = simple_strtoul(q, &q, 0);
208 if (*q == 'k' || *q == 'K') {
209 maxmem <<= 10;
210 ++q;
211 } else if (*q == 'm' || *q == 'M') {
212 maxmem <<= 20;
213 ++q;
214 }
215 }
216 __max_memory = maxmem;
217 }
218} 122}
219 123
220/* 124/*
@@ -227,23 +131,22 @@ void __init MMU_init(void)
227 if (ppc_md.progress) 131 if (ppc_md.progress)
228 ppc_md.progress("MMU:enter", 0x111); 132 ppc_md.progress("MMU:enter", 0x111);
229 133
134 /* 601 can only access 16MB at the moment */
135 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
136 __initial_memory_limit = 0x01000000;
137
230 /* parse args from command line */ 138 /* parse args from command line */
231 MMU_setup(); 139 MMU_setup();
232 140
233 /* 141 if (lmb.memory.cnt > 1) {
234 * Figure out how much memory we have, how much 142 lmb.memory.cnt = 1;
235 * is lowmem, and how much is highmem. If we were 143 lmb_analyze();
236 * passed the total memory size from the bootloader, 144 printk(KERN_WARNING "Only using first contiguous memory region");
237 * just use it. 145 }
238 */ 146
239 if (boot_mem_size) 147 total_memory = lmb_end_of_DRAM();
240 total_memory = boot_mem_size;
241 else
242 total_memory = find_end_of_memory();
243
244 if (__max_memory && total_memory > __max_memory)
245 total_memory = __max_memory;
246 total_lowmem = total_memory; 148 total_lowmem = total_memory;
149
247#ifdef CONFIG_FSL_BOOKE 150#ifdef CONFIG_FSL_BOOKE
248 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB 151 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
249 * entries, so we need to adjust lowmem to match the amount we can map 152 * entries, so we need to adjust lowmem to match the amount we can map
@@ -256,7 +159,6 @@ void __init MMU_init(void)
256 total_memory = total_lowmem; 159 total_memory = total_lowmem;
257#endif /* CONFIG_HIGHMEM */ 160#endif /* CONFIG_HIGHMEM */
258 } 161 }
259 set_phys_avail(total_lowmem);
260 162
261 /* Initialize the MMU hardware */ 163 /* Initialize the MMU hardware */
262 if (ppc_md.progress) 164 if (ppc_md.progress)
@@ -303,7 +205,8 @@ void __init *early_get_page(void)
303 if (init_bootmem_done) { 205 if (init_bootmem_done) {
304 p = alloc_bootmem_pages(PAGE_SIZE); 206 p = alloc_bootmem_pages(PAGE_SIZE);
305 } else { 207 } else {
306 p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE); 208 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
209 __initial_memory_limit));
307 } 210 }
308 return p; 211 return p;
309} 212}
@@ -353,229 +256,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
353 } 256 }
354} 257}
355#endif 258#endif
356
357/*
358 * Initialize the bootmem system and give it all the memory we
359 * have available.
360 */
361void __init do_init_bootmem(void)
362{
363 unsigned long start, size;
364 int i;
365
366 /*
367 * Find an area to use for the bootmem bitmap.
368 * We look for the first area which is at least
369 * 128kB in length (128kB is enough for a bitmap
370 * for 4GB of memory, using 4kB pages), plus 1 page
371 * (in case the address isn't page-aligned).
372 */
373 start = 0;
374 size = 0;
375 for (i = 0; i < phys_avail.n_regions; ++i) {
376 unsigned long a = phys_avail.regions[i].address;
377 unsigned long s = phys_avail.regions[i].size;
378 if (s <= size)
379 continue;
380 start = a;
381 size = s;
382 if (s >= 33 * PAGE_SIZE)
383 break;
384 }
385 start = PAGE_ALIGN(start);
386
387 min_low_pfn = start >> PAGE_SHIFT;
388 max_low_pfn = (PPC_MEMSTART + total_lowmem) >> PAGE_SHIFT;
389 max_pfn = (PPC_MEMSTART + total_memory) >> PAGE_SHIFT;
390 boot_mapsize = init_bootmem_node(&contig_page_data, min_low_pfn,
391 PPC_MEMSTART >> PAGE_SHIFT,
392 max_low_pfn);
393
394 /* remove the bootmem bitmap from the available memory */
395 mem_pieces_remove(&phys_avail, start, boot_mapsize, 1);
396
397 /* add everything in phys_avail into the bootmem map */
398 for (i = 0; i < phys_avail.n_regions; ++i)
399 free_bootmem(phys_avail.regions[i].address,
400 phys_avail.regions[i].size);
401
402 init_bootmem_done = 1;
403}
404
405/*
406 * paging_init() sets up the page tables - in fact we've already done this.
407 */
408void __init paging_init(void)
409{
410 unsigned long zones_size[MAX_NR_ZONES], i;
411
412#ifdef CONFIG_HIGHMEM
413 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
414 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
415 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
416 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
417 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
418 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
419 kmap_prot = PAGE_KERNEL;
420#endif /* CONFIG_HIGHMEM */
421
422 /*
423 * All pages are DMA-able so we put them all in the DMA zone.
424 */
425 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
426 for (i = 1; i < MAX_NR_ZONES; i++)
427 zones_size[i] = 0;
428
429#ifdef CONFIG_HIGHMEM
430 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
431#endif /* CONFIG_HIGHMEM */
432
433 free_area_init(zones_size);
434}
435
436void __init mem_init(void)
437{
438 unsigned long addr;
439 int codepages = 0;
440 int datapages = 0;
441 int initpages = 0;
442#ifdef CONFIG_HIGHMEM
443 unsigned long highmem_mapnr;
444
445 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
446#endif /* CONFIG_HIGHMEM */
447 max_mapnr = total_memory >> PAGE_SHIFT;
448
449 high_memory = (void *) __va(PPC_MEMSTART + total_lowmem);
450 num_physpages = max_mapnr; /* RAM is assumed contiguous */
451
452 totalram_pages += free_all_bootmem();
453
454#ifdef CONFIG_BLK_DEV_INITRD
455 /* if we are booted from BootX with an initial ramdisk,
456 make sure the ramdisk pages aren't reserved. */
457 if (initrd_start) {
458 for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)
459 ClearPageReserved(virt_to_page(addr));
460 }
461#endif /* CONFIG_BLK_DEV_INITRD */
462
463#ifdef CONFIG_PPC_OF
464 /* mark the RTAS pages as reserved */
465 if ( rtas_data )
466 for (addr = (ulong)__va(rtas_data);
467 addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ;
468 addr += PAGE_SIZE)
469 SetPageReserved(virt_to_page(addr));
470#endif
471#ifdef CONFIG_PPC_PMAC
472 if (agp_special_page)
473 SetPageReserved(virt_to_page(agp_special_page));
474#endif
475 for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory;
476 addr += PAGE_SIZE) {
477 if (!PageReserved(virt_to_page(addr)))
478 continue;
479 if (addr < (ulong) etext)
480 codepages++;
481 else if (addr >= (unsigned long)&__init_begin
482 && addr < (unsigned long)&__init_end)
483 initpages++;
484 else if (addr < (ulong) klimit)
485 datapages++;
486 }
487
488#ifdef CONFIG_HIGHMEM
489 {
490 unsigned long pfn;
491
492 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
493 struct page *page = mem_map + pfn;
494
495 ClearPageReserved(page);
496 set_page_count(page, 1);
497 __free_page(page);
498 totalhigh_pages++;
499 }
500 totalram_pages += totalhigh_pages;
501 }
502#endif /* CONFIG_HIGHMEM */
503
504 printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
505 (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
506 codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
507 initpages<< (PAGE_SHIFT-10),
508 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
509
510#ifdef CONFIG_PPC_PMAC
511 if (agp_special_page)
512 printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page);
513#endif
514
515 mem_init_done = 1;
516}
517
518/*
519 * Set phys_avail to the amount of physical memory,
520 * less the kernel text/data/bss.
521 */
522void __init
523set_phys_avail(unsigned long total_memory)
524{
525 unsigned long kstart, ksize;
526
527 /*
528 * Initially, available physical memory is equivalent to all
529 * physical memory.
530 */
531
532 phys_avail.regions[0].address = PPC_MEMSTART;
533 phys_avail.regions[0].size = total_memory;
534 phys_avail.n_regions = 1;
535
536 /*
537 * Map out the kernel text/data/bss from the available physical
538 * memory.
539 */
540
541 kstart = __pa(_stext); /* should be 0 */
542 ksize = PAGE_ALIGN(klimit - _stext);
543
544 mem_pieces_remove(&phys_avail, kstart, ksize, 0);
545 mem_pieces_remove(&phys_avail, 0, 0x4000, 0);
546
547#if defined(CONFIG_BLK_DEV_INITRD)
548 /* Remove the init RAM disk from the available memory. */
549 if (initrd_start) {
550 mem_pieces_remove(&phys_avail, __pa(initrd_start),
551 initrd_end - initrd_start, 1);
552 }
553#endif /* CONFIG_BLK_DEV_INITRD */
554#ifdef CONFIG_PPC_OF
555 /* remove the RTAS pages from the available memory */
556 if (rtas_data)
557 mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1);
558#endif
559#ifdef CONFIG_PPC_PMAC
560 /* Because of some uninorth weirdness, we need a page of
561 * memory as high as possible (it must be outside of the
562 * bus address seen as the AGP aperture). It will be used
563 * by the r128 DRM driver
564 *
565 * FIXME: We need to make sure that page doesn't overlap any of the\
566 * above. This could be done by improving mem_pieces_find to be able
567 * to do a backward search from the end of the list.
568 */
569 if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) {
570 agp_special_page = (total_memory - PAGE_SIZE);
571 mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0);
572 agp_special_page = (unsigned long)__va(agp_special_page);
573 }
574#endif /* CONFIG_PPC_PMAC */
575}
576
577/* Mark some memory as reserved by removing it from phys_avail. */
578void __init reserve_phys_mem(unsigned long start, unsigned long size)
579{
580 mem_pieces_remove(&phys_avail, start, size, 1);
581}