aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/init_64.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-08-05 02:19:56 -0400
committerPaul Mackerras <paulus@samba.org>2008-08-10 20:09:56 -0400
commitbc033b63bbfeb6c4b4eb0a1d083c650e4a0d2af8 (patch)
tree40363a556790dc45e45f3f1823c7cca93de70dac /arch/powerpc/mm/init_64.c
parent8db13a0e1e87ae2741ca1677caa90e9592c4cc43 (diff)
powerpc/mm: Fix attribute confusion with htab_bolt_mapping()
The function htab_bolt_mapping() is used to create permanent mappings in the MMU hash table, for example, in order to create the linear mapping of vmemmap. It's also used by early boot ioremap (before mem_init_done). However, the way ioremap uses it is incorrect as it passes it the protection flags in the "linux PTE" form while htab_bolt_mapping() expects them in the hash table format. This is made more confusing by the fact that some of those flags are actually in the same position in both cases. This fixes it all by making htab_bolt_mapping() take normal linux protection flags instead, and use a little helper to convert them to htab flags. Callers can now use the usual PAGE_* definitions safely. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> arch/powerpc/include/asm/mmu-hash64.h | 2 - arch/powerpc/mm/hash_utils_64.c | 65 ++++++++++++++++++++-------------- arch/powerpc/mm/init_64.c | 9 +--- 3 files changed, 44 insertions(+), 32 deletions(-) Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/init_64.c')
-rw-r--r--arch/powerpc/mm/init_64.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 4f7df85129d..036fe2f10c7 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -206,13 +206,10 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
206int __meminit vmemmap_populate(struct page *start_page, 206int __meminit vmemmap_populate(struct page *start_page,
207 unsigned long nr_pages, int node) 207 unsigned long nr_pages, int node)
208{ 208{
209 unsigned long mode_rw;
210 unsigned long start = (unsigned long)start_page; 209 unsigned long start = (unsigned long)start_page;
211 unsigned long end = (unsigned long)(start_page + nr_pages); 210 unsigned long end = (unsigned long)(start_page + nr_pages);
212 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 211 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
213 212
214 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
215
216 /* Align to the page size of the linear mapping. */ 213 /* Align to the page size of the linear mapping. */
217 start = _ALIGN_DOWN(start, page_size); 214 start = _ALIGN_DOWN(start, page_size);
218 215
@@ -230,9 +227,9 @@ int __meminit vmemmap_populate(struct page *start_page,
230 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n", 227 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
231 start, p, __pa(p)); 228 start, p, __pa(p));
232 229
233 mapped = htab_bolt_mapping(start, start + page_size, 230 mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
234 __pa(p), mode_rw, mmu_vmemmap_psize, 231 PAGE_KERNEL, mmu_vmemmap_psize,
235 mmu_kernel_ssize); 232 mmu_kernel_ssize);
236 BUG_ON(mapped < 0); 233 BUG_ON(mapped < 0);
237 } 234 }
238 235