diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2005-04-16 18:24:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:24:33 -0400 |
commit | dfbacdc1a0b568dfa69fb2c1b39b608074001083 (patch) | |
tree | f63fdb5db967253e46472ff776d1e22c38cee16e /arch/ppc64/mm | |
parent | 7bbd827750e630003896c96d0212962276ee5d91 (diff) |
[PATCH] ppc64: Fix semantics of __ioremap
This patch fixes ppc64 __ioremap() so that it stops adding implicitely
_PAGE_GUARDED when the cache is not writeback, and instead, let the callers
provide the flag they want here. This allows things like framebuffers to
explicitely request a non-cacheable and non-guarded mapping which is more
efficient for that type of memory without side effects. The patch also
fixes all current callers to add _PAGE_GUARDED except btext, which is fine
without it.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r-- | arch/ppc64/mm/init.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index 23813d03e1c4..a7149b9fc35c 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c | |||
@@ -155,7 +155,8 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
155 | ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); | 155 | ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); |
156 | 156 | ||
157 | pa = abs_to_phys(pa); | 157 | pa = abs_to_phys(pa); |
158 | set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); | 158 | set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
159 | __pgprot(flags))); | ||
159 | spin_unlock(&ioremap_mm.page_table_lock); | 160 | spin_unlock(&ioremap_mm.page_table_lock); |
160 | } else { | 161 | } else { |
161 | unsigned long va, vpn, hash, hpteg; | 162 | unsigned long va, vpn, hash, hpteg; |
@@ -191,12 +192,9 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | |||
191 | 192 | ||
192 | if ((flags & _PAGE_PRESENT) == 0) | 193 | if ((flags & _PAGE_PRESENT) == 0) |
193 | flags |= pgprot_val(PAGE_KERNEL); | 194 | flags |= pgprot_val(PAGE_KERNEL); |
194 | if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU)) | ||
195 | flags |= _PAGE_GUARDED; | ||
196 | 195 | ||
197 | for (i = 0; i < size; i += PAGE_SIZE) { | 196 | for (i = 0; i < size; i += PAGE_SIZE) |
198 | map_io_page(ea+i, pa+i, flags); | 197 | map_io_page(ea+i, pa+i, flags); |
199 | } | ||
200 | 198 | ||
201 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | 199 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); |
202 | } | 200 | } |
@@ -205,7 +203,7 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | |||
205 | void __iomem * | 203 | void __iomem * |
206 | ioremap(unsigned long addr, unsigned long size) | 204 | ioremap(unsigned long addr, unsigned long size) |
207 | { | 205 | { |
208 | return __ioremap(addr, size, _PAGE_NO_CACHE); | 206 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); |
209 | } | 207 | } |
210 | 208 | ||
211 | void __iomem * | 209 | void __iomem * |
@@ -272,7 +270,8 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
272 | return 1; | 270 | return 1; |
273 | } | 271 | } |
274 | if (ea != (unsigned long) area->addr) { | 272 | if (ea != (unsigned long) area->addr) { |
275 | printk(KERN_ERR "unexpected addr return from im_get_area\n"); | 273 | printk(KERN_ERR "unexpected addr return from " |
274 | "im_get_area\n"); | ||
276 | return 1; | 275 | return 1; |
277 | } | 276 | } |
278 | } | 277 | } |
@@ -315,7 +314,8 @@ static void unmap_im_area_pte(pmd_t *pmd, unsigned long address, | |||
315 | continue; | 314 | continue; |
316 | if (pte_present(page)) | 315 | if (pte_present(page)) |
317 | continue; | 316 | continue; |
318 | printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); | 317 | printk(KERN_CRIT "Whee.. Swapped out page in kernel page" |
318 | " table\n"); | ||
319 | } while (address < end); | 319 | } while (address < end); |
320 | } | 320 | } |
321 | 321 | ||
@@ -352,7 +352,7 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, | |||
352 | * Access to IO memory should be serialized by driver. | 352 | * Access to IO memory should be serialized by driver. |
353 | * This code is modeled after vmalloc code - unmap_vm_area() | 353 | * This code is modeled after vmalloc code - unmap_vm_area() |
354 | * | 354 | * |
355 | * XXX what about calls before mem_init_done (ie python_countermeasures()) | 355 | * XXX what about calls before mem_init_done (ie python_countermeasures()) |
356 | */ | 356 | */ |
357 | void iounmap(volatile void __iomem *token) | 357 | void iounmap(volatile void __iomem *token) |
358 | { | 358 | { |