aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-08-05 02:19:56 -0400
committerPaul Mackerras <paulus@samba.org>2008-08-10 20:09:56 -0400
commitbc033b63bbfeb6c4b4eb0a1d083c650e4a0d2af8 (patch)
tree40363a556790dc45e45f3f1823c7cca93de70dac /arch/powerpc
parent8db13a0e1e87ae2741ca1677caa90e9592c4cc43 (diff)
powerpc/mm: Fix attribute confusion with htab_bolt_mapping()
The function htab_bolt_mapping() is used to create permanent mappings in the MMU hash table, for example, in order to create the linear mapping of vmemmap. It's also used by early boot ioremap (before mem_init_done). However, the way ioremap uses it is incorrect as it passes it the protection flags in the "linux PTE" form while htab_bolt_mapping() expects them in the hash table format. This is made more confusing by the fact that some of those flags are actually in the same position in both cases. This fixes it all by making htab_bolt_mapping() take normal linux protection flags instead, and use a little helper to convert them to htab flags. Callers can now use the usual PAGE_* definitions safely. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> arch/powerpc/include/asm/mmu-hash64.h | 2 - arch/powerpc/mm/hash_utils_64.c | 65 ++++++++++++++++++++-------------- arch/powerpc/mm/init_64.c | 9 +--- 3 files changed, 44 insertions(+), 32 deletions(-) Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c63
-rw-r--r--arch/powerpc/mm/init_64.c9
3 files changed, 43 insertions, 31 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 19c7a940349..c2df53c5ceb 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -278,7 +278,7 @@ extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
278 unsigned long trap); 278 unsigned long trap);
279 279
280extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 280extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
281 unsigned long pstart, unsigned long mode, 281 unsigned long pstart, unsigned long prot,
282 int psize, int ssize); 282 int psize, int ssize);
283extern void set_huge_psize(int psize); 283extern void set_huge_psize(int psize);
284extern void add_gpage(unsigned long addr, unsigned long page_size, 284extern void add_gpage(unsigned long addr, unsigned long page_size,
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5ce5a4dcd00..14be408dfc9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -151,39 +151,53 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
151 }, 151 },
152}; 152};
153 153
154static unsigned long htab_convert_pte_flags(unsigned long pteflags)
155{
156 unsigned long rflags = pteflags & 0x1fa;
157
158 /* _PAGE_EXEC -> NOEXEC */
159 if ((pteflags & _PAGE_EXEC) == 0)
160 rflags |= HPTE_R_N;
161
162 /* PP bits. PAGE_USER is already PP bit 0x2, so we only
163 * need to add in 0x1 if it's a read-only user page
164 */
165 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
166 (pteflags & _PAGE_DIRTY)))
167 rflags |= 1;
168
169 /* Always add C */
170 return rflags | HPTE_R_C;
171}
154 172
155int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 173int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
156 unsigned long pstart, unsigned long mode, 174 unsigned long pstart, unsigned long prot,
157 int psize, int ssize) 175 int psize, int ssize)
158{ 176{
159 unsigned long vaddr, paddr; 177 unsigned long vaddr, paddr;
160 unsigned int step, shift; 178 unsigned int step, shift;
161 unsigned long tmp_mode;
162 int ret = 0; 179 int ret = 0;
163 180
164 shift = mmu_psize_defs[psize].shift; 181 shift = mmu_psize_defs[psize].shift;
165 step = 1 << shift; 182 step = 1 << shift;
166 183
184 prot = htab_convert_pte_flags(prot);
185
186 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
187 vstart, vend, pstart, prot, psize, ssize);
188
167 for (vaddr = vstart, paddr = pstart; vaddr < vend; 189 for (vaddr = vstart, paddr = pstart; vaddr < vend;
168 vaddr += step, paddr += step) { 190 vaddr += step, paddr += step) {
169 unsigned long hash, hpteg; 191 unsigned long hash, hpteg;
170 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 192 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
171 unsigned long va = hpt_va(vaddr, vsid, ssize); 193 unsigned long va = hpt_va(vaddr, vsid, ssize);
172 194
173 tmp_mode = mode;
174
175 /* Make non-kernel text non-executable */
176 if (!in_kernel_text(vaddr))
177 tmp_mode = mode | HPTE_R_N;
178
179 hash = hpt_hash(va, shift, ssize); 195 hash = hpt_hash(va, shift, ssize);
180 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 196 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
181 197
182 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
183
184 BUG_ON(!ppc_md.hpte_insert); 198 BUG_ON(!ppc_md.hpte_insert);
185 ret = ppc_md.hpte_insert(hpteg, va, paddr, 199 ret = ppc_md.hpte_insert(hpteg, va, paddr, prot,
186 tmp_mode, HPTE_V_BOLTED, psize, ssize); 200 HPTE_V_BOLTED, psize, ssize);
187 201
188 if (ret < 0) 202 if (ret < 0)
189 break; 203 break;
@@ -519,9 +533,9 @@ static unsigned long __init htab_get_table_size(void)
519#ifdef CONFIG_MEMORY_HOTPLUG 533#ifdef CONFIG_MEMORY_HOTPLUG
520void create_section_mapping(unsigned long start, unsigned long end) 534void create_section_mapping(unsigned long start, unsigned long end)
521{ 535{
522 BUG_ON(htab_bolt_mapping(start, end, __pa(start), 536 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
523 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, 537 PAGE_KERNEL, mmu_linear_psize,
524 mmu_linear_psize, mmu_kernel_ssize)); 538 mmu_kernel_ssize));
525} 539}
526 540
527int remove_section_mapping(unsigned long start, unsigned long end) 541int remove_section_mapping(unsigned long start, unsigned long end)
@@ -570,7 +584,7 @@ void __init htab_initialize(void)
570{ 584{
571 unsigned long table; 585 unsigned long table;
572 unsigned long pteg_count; 586 unsigned long pteg_count;
573 unsigned long mode_rw; 587 unsigned long prot, tprot;
574 unsigned long base = 0, size = 0, limit; 588 unsigned long base = 0, size = 0, limit;
575 int i; 589 int i;
576 590
@@ -628,7 +642,7 @@ void __init htab_initialize(void)
628 mtspr(SPRN_SDR1, _SDR1); 642 mtspr(SPRN_SDR1, _SDR1);
629 } 643 }
630 644
631 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; 645 prot = PAGE_KERNEL;
632 646
633#ifdef CONFIG_DEBUG_PAGEALLOC 647#ifdef CONFIG_DEBUG_PAGEALLOC
634 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; 648 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
@@ -646,8 +660,10 @@ void __init htab_initialize(void)
646 for (i=0; i < lmb.memory.cnt; i++) { 660 for (i=0; i < lmb.memory.cnt; i++) {
647 base = (unsigned long)__va(lmb.memory.region[i].base); 661 base = (unsigned long)__va(lmb.memory.region[i].base);
648 size = lmb.memory.region[i].size; 662 size = lmb.memory.region[i].size;
663 tprot = prot | (in_kernel_text(base) ? _PAGE_EXEC : 0);
649 664
650 DBG("creating mapping for region: %lx : %lx\n", base, size); 665 DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
666 base, size, tprot);
651 667
652#ifdef CONFIG_U3_DART 668#ifdef CONFIG_U3_DART
653 /* Do not map the DART space. Fortunately, it will be aligned 669 /* Do not map the DART space. Fortunately, it will be aligned
@@ -664,21 +680,21 @@ void __init htab_initialize(void)
664 unsigned long dart_table_end = dart_tablebase + 16 * MB; 680 unsigned long dart_table_end = dart_tablebase + 16 * MB;
665 if (base != dart_tablebase) 681 if (base != dart_tablebase)
666 BUG_ON(htab_bolt_mapping(base, dart_tablebase, 682 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
667 __pa(base), mode_rw, 683 __pa(base), tprot,
668 mmu_linear_psize, 684 mmu_linear_psize,
669 mmu_kernel_ssize)); 685 mmu_kernel_ssize));
670 if ((base + size) > dart_table_end) 686 if ((base + size) > dart_table_end)
671 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, 687 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
672 base + size, 688 base + size,
673 __pa(dart_table_end), 689 __pa(dart_table_end),
674 mode_rw, 690 tprot,
675 mmu_linear_psize, 691 mmu_linear_psize,
676 mmu_kernel_ssize)); 692 mmu_kernel_ssize));
677 continue; 693 continue;
678 } 694 }
679#endif /* CONFIG_U3_DART */ 695#endif /* CONFIG_U3_DART */
680 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 696 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
681 mode_rw, mmu_linear_psize, mmu_kernel_ssize)); 697 tprot, mmu_linear_psize, mmu_kernel_ssize));
682 } 698 }
683 699
684 /* 700 /*
@@ -696,7 +712,7 @@ void __init htab_initialize(void)
696 tce_alloc_start = base + size + 1; 712 tce_alloc_start = base + size + 1;
697 713
698 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, 714 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
699 __pa(tce_alloc_start), mode_rw, 715 __pa(tce_alloc_start), prot,
700 mmu_linear_psize, mmu_kernel_ssize)); 716 mmu_linear_psize, mmu_kernel_ssize));
701 } 717 }
702 718
@@ -1117,8 +1133,7 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1117 unsigned long hash, hpteg; 1133 unsigned long hash, hpteg;
1118 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1134 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1119 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1135 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
1120 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY | 1136 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
1121 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
1122 int ret; 1137 int ret;
1123 1138
1124 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1139 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 4f7df85129d..036fe2f10c7 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -206,13 +206,10 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
206int __meminit vmemmap_populate(struct page *start_page, 206int __meminit vmemmap_populate(struct page *start_page,
207 unsigned long nr_pages, int node) 207 unsigned long nr_pages, int node)
208{ 208{
209 unsigned long mode_rw;
210 unsigned long start = (unsigned long)start_page; 209 unsigned long start = (unsigned long)start_page;
211 unsigned long end = (unsigned long)(start_page + nr_pages); 210 unsigned long end = (unsigned long)(start_page + nr_pages);
212 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 211 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
213 212
214 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
215
216 /* Align to the page size of the linear mapping. */ 213 /* Align to the page size of the linear mapping. */
217 start = _ALIGN_DOWN(start, page_size); 214 start = _ALIGN_DOWN(start, page_size);
218 215
@@ -230,9 +227,9 @@ int __meminit vmemmap_populate(struct page *start_page,
230 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n", 227 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
231 start, p, __pa(p)); 228 start, p, __pa(p));
232 229
233 mapped = htab_bolt_mapping(start, start + page_size, 230 mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
234 __pa(p), mode_rw, mmu_vmemmap_psize, 231 PAGE_KERNEL, mmu_vmemmap_psize,
235 mmu_kernel_ssize); 232 mmu_kernel_ssize);
236 BUG_ON(mapped < 0); 233 BUG_ON(mapped < 0);
237 } 234 }
238 235