diff options
author | Kumar Gala <galak@kernel.crashing.org> | 2008-04-15 15:52:21 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-04-16 17:46:12 -0400 |
commit | 99c62dd773797b68f3b1ca6bb3274725d1852fa2 (patch) | |
tree | 7641433c9a3358c5dc108f2d3d4894f5f96f0b19 /arch/powerpc/mm | |
parent | 1993cbf4ae7d30f9a417e143c1344466f2e2ae2e (diff) |
[POWERPC] Remove and replace uses of PPC_MEMSTART with memstart_addr
A number of users of PPC_MEMSTART (40x, ppc_mmu_32) can just always
use 0 as we don't support booting these kernels at non-zero physical
addresses since their exception vectors must be at 0 (or 0xfffx_xxxx).
For the sub-arches that support relocatable interrupt vectors
(book-e), it's reasonable to have memory start at a non-zero physical
address. For those cases use the variable memstart_addr instead of
the #define PPC_MEMSTART since the only uses of PPC_MEMSTART are for
initialization and in the future we can set memstart_addr at runtime
to have a relocatable kernel.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 11 |
6 files changed, 15 insertions, 22 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 3899ea97fbdf..cecbbc76f624 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
@@ -97,7 +97,7 @@ unsigned long __init mmu_mapin_ram(void) | |||
97 | phys_addr_t p; | 97 | phys_addr_t p; |
98 | 98 | ||
99 | v = KERNELBASE; | 99 | v = KERNELBASE; |
100 | p = PPC_MEMSTART; | 100 | p = 0; |
101 | s = total_lowmem; | 101 | s = total_lowmem; |
102 | 102 | ||
103 | if (__map_without_ltlbs) | 103 | if (__map_without_ltlbs) |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index c93a966b7e4b..3dd0c8189bae 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
@@ -53,13 +53,12 @@ | |||
53 | #include <asm/machdep.h> | 53 | #include <asm/machdep.h> |
54 | #include <asm/setup.h> | 54 | #include <asm/setup.h> |
55 | 55 | ||
56 | #include "mmu_decl.h" | ||
57 | |||
56 | extern void loadcam_entry(unsigned int index); | 58 | extern void loadcam_entry(unsigned int index); |
57 | unsigned int tlbcam_index; | 59 | unsigned int tlbcam_index; |
58 | unsigned int num_tlbcam_entries; | 60 | unsigned int num_tlbcam_entries; |
59 | static unsigned long __cam0, __cam1, __cam2; | 61 | static unsigned long __cam0, __cam1, __cam2; |
60 | extern unsigned long total_lowmem; | ||
61 | extern unsigned long __max_low_memory; | ||
62 | extern unsigned long __initial_memory_limit; | ||
63 | #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE | 62 | #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE |
64 | 63 | ||
65 | #define NUM_TLBCAMS (16) | 64 | #define NUM_TLBCAMS (16) |
@@ -165,15 +164,15 @@ void invalidate_tlbcam_entry(int index) | |||
165 | void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1, | 164 | void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1, |
166 | unsigned long cam2) | 165 | unsigned long cam2) |
167 | { | 166 | { |
168 | settlbcam(0, PAGE_OFFSET, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0); | 167 | settlbcam(0, PAGE_OFFSET, memstart_addr, cam0, _PAGE_KERNEL, 0); |
169 | tlbcam_index++; | 168 | tlbcam_index++; |
170 | if (cam1) { | 169 | if (cam1) { |
171 | tlbcam_index++; | 170 | tlbcam_index++; |
172 | settlbcam(1, PAGE_OFFSET+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0); | 171 | settlbcam(1, PAGE_OFFSET+cam0, memstart_addr+cam0, cam1, _PAGE_KERNEL, 0); |
173 | } | 172 | } |
174 | if (cam2) { | 173 | if (cam2) { |
175 | tlbcam_index++; | 174 | tlbcam_index++; |
176 | settlbcam(2, PAGE_OFFSET+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0); | 175 | settlbcam(2, PAGE_OFFSET+cam0+cam1, memstart_addr+cam0+cam1, cam2, _PAGE_KERNEL, 0); |
177 | } | 176 | } |
178 | } | 177 | } |
179 | 178 | ||
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 0c66a9fe63f5..1d7e5b8ade6a 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -59,8 +59,8 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |||
59 | unsigned long total_memory; | 59 | unsigned long total_memory; |
60 | unsigned long total_lowmem; | 60 | unsigned long total_lowmem; |
61 | 61 | ||
62 | unsigned long ppc_memstart; | 62 | phys_addr_t memstart_addr; |
63 | unsigned long ppc_memoffset = PAGE_OFFSET; | 63 | phys_addr_t lowmem_end_addr; |
64 | 64 | ||
65 | int boot_mapsize; | 65 | int boot_mapsize; |
66 | #ifdef CONFIG_PPC_PMAC | 66 | #ifdef CONFIG_PPC_PMAC |
@@ -145,8 +145,7 @@ void __init MMU_init(void) | |||
145 | printk(KERN_WARNING "Only using first contiguous memory region"); | 145 | printk(KERN_WARNING "Only using first contiguous memory region"); |
146 | } | 146 | } |
147 | 147 | ||
148 | total_memory = lmb_end_of_DRAM(); | 148 | total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr; |
149 | total_lowmem = total_memory; | ||
150 | 149 | ||
151 | #ifdef CONFIG_FSL_BOOKE | 150 | #ifdef CONFIG_FSL_BOOKE |
152 | /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB | 151 | /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB |
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index ebfd13dc9d19..5bc11f5933a9 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -51,6 +51,7 @@ extern unsigned long __max_low_memory; | |||
51 | extern unsigned long __initial_memory_limit; | 51 | extern unsigned long __initial_memory_limit; |
52 | extern unsigned long total_memory; | 52 | extern unsigned long total_memory; |
53 | extern unsigned long total_lowmem; | 53 | extern unsigned long total_lowmem; |
54 | extern phys_addr_t memstart_addr; | ||
54 | 55 | ||
55 | /* ...and now those things that may be slightly different between processor | 56 | /* ...and now those things that may be slightly different between processor |
56 | * architectures. -- Dan | 57 | * architectures. -- Dan |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index ac3390f81900..64c44bcc68de 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -281,12 +281,13 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) | |||
281 | */ | 281 | */ |
282 | void __init mapin_ram(void) | 282 | void __init mapin_ram(void) |
283 | { | 283 | { |
284 | unsigned long v, p, s, f; | 284 | unsigned long v, s, f; |
285 | phys_addr_t p; | ||
285 | int ktext; | 286 | int ktext; |
286 | 287 | ||
287 | s = mmu_mapin_ram(); | 288 | s = mmu_mapin_ram(); |
288 | v = KERNELBASE + s; | 289 | v = KERNELBASE + s; |
289 | p = PPC_MEMSTART + s; | 290 | p = memstart_addr + s; |
290 | for (; s < total_lowmem; s += PAGE_SIZE) { | 291 | for (; s < total_lowmem; s += PAGE_SIZE) { |
291 | ktext = ((char *) v >= _stext && (char *) v < etext); | 292 | ktext = ((char *) v >= _stext && (char *) v < etext); |
292 | f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM; | 293 | f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM; |
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 72de3c79210a..65f915cbd29c 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
@@ -82,7 +82,6 @@ unsigned long __init mmu_mapin_ram(void) | |||
82 | #else | 82 | #else |
83 | unsigned long tot, bl, done; | 83 | unsigned long tot, bl, done; |
84 | unsigned long max_size = (256<<20); | 84 | unsigned long max_size = (256<<20); |
85 | unsigned long align; | ||
86 | 85 | ||
87 | if (__map_without_bats) { | 86 | if (__map_without_bats) { |
88 | printk(KERN_DEBUG "RAM mapped without BATs\n"); | 87 | printk(KERN_DEBUG "RAM mapped without BATs\n"); |
@@ -93,19 +92,13 @@ unsigned long __init mmu_mapin_ram(void) | |||
93 | 92 | ||
94 | /* Make sure we don't map a block larger than the | 93 | /* Make sure we don't map a block larger than the |
95 | smallest alignment of the physical address. */ | 94 | smallest alignment of the physical address. */ |
96 | /* alignment of PPC_MEMSTART */ | ||
97 | align = ~(PPC_MEMSTART-1) & PPC_MEMSTART; | ||
98 | /* set BAT block size to MIN(max_size, align) */ | ||
99 | if (align && align < max_size) | ||
100 | max_size = align; | ||
101 | |||
102 | tot = total_lowmem; | 95 | tot = total_lowmem; |
103 | for (bl = 128<<10; bl < max_size; bl <<= 1) { | 96 | for (bl = 128<<10; bl < max_size; bl <<= 1) { |
104 | if (bl * 2 > tot) | 97 | if (bl * 2 > tot) |
105 | break; | 98 | break; |
106 | } | 99 | } |
107 | 100 | ||
108 | setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM); | 101 | setbat(2, KERNELBASE, 0, bl, _PAGE_RAM); |
109 | done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1; | 102 | done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1; |
110 | if ((done < tot) && !bat_addrs[3].limit) { | 103 | if ((done < tot) && !bat_addrs[3].limit) { |
111 | /* use BAT3 to cover a bit more */ | 104 | /* use BAT3 to cover a bit more */ |
@@ -113,7 +106,7 @@ unsigned long __init mmu_mapin_ram(void) | |||
113 | for (bl = 128<<10; bl < max_size; bl <<= 1) | 106 | for (bl = 128<<10; bl < max_size; bl <<= 1) |
114 | if (bl * 2 > tot) | 107 | if (bl * 2 > tot) |
115 | break; | 108 | break; |
116 | setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM); | 109 | setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM); |
117 | done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1; | 110 | done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1; |
118 | } | 111 | } |
119 | 112 | ||