aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-04-16 18:24:33 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:24:33 -0400
commitdfbacdc1a0b568dfa69fb2c1b39b608074001083 (patch)
treef63fdb5db967253e46472ff776d1e22c38cee16e /arch/ppc64
parent7bbd827750e630003896c96d0212962276ee5d91 (diff)
[PATCH] ppc64: Fix semantics of __ioremap
This patch fixes ppc64 __ioremap() so that it stops adding implicitely _PAGE_GUARDED when the cache is not writeback, and instead, let the callers provide the flag they want here. This allows things like framebuffers to explicitely request a non-cacheable and non-guarded mapping which is more efficient for that type of memory without side effects. The patch also fixes all current callers to add _PAGE_GUARDED except btext, which is fine without it. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/kernel/maple_setup.c2
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c2
-rw-r--r--arch/ppc64/kernel/pci.c12
-rw-r--r--arch/ppc64/mm/init.c18
4 files changed, 18 insertions, 16 deletions
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index 1db6ea0f336f..8cf95a27178e 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -142,7 +142,7 @@ static void __init maple_init_early(void)
142 if (physport) { 142 if (physport) {
143 void *comport; 143 void *comport;
144 /* Map the uart for udbg. */ 144 /* Map the uart for udbg. */
145 comport = (void *)__ioremap(physport, 16, _PAGE_NO_CACHE); 145 comport = (void *)ioremap(physport, 16);
146 udbg_init_uart(comport, default_speed); 146 udbg_init_uart(comport, default_speed);
147 147
148 ppc_md.udbg_putc = udbg_putc; 148 ppc_md.udbg_putc = udbg_putc;
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 06536de51257..6c0d1d58a552 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -363,7 +363,7 @@ static void __init pSeries_init_early(void)
363 find_udbg_vterm(); 363 find_udbg_vterm();
364 else if (physport) { 364 else if (physport) {
365 /* Map the uart for udbg. */ 365 /* Map the uart for udbg. */
366 comport = (void *)__ioremap(physport, 16, _PAGE_NO_CACHE); 366 comport = (void *)ioremap(physport, 16);
367 udbg_init_uart(comport, default_speed); 367 udbg_init_uart(comport, default_speed);
368 368
369 ppc_md.udbg_putc = udbg_putc; 369 ppc_md.udbg_putc = udbg_putc;
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index fdd8f7869a68..be3cc387c1ec 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -547,8 +547,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
547 if (range == NULL || (rlen < sizeof(struct isa_range))) { 547 if (range == NULL || (rlen < sizeof(struct isa_range))) {
548 printk(KERN_ERR "no ISA ranges or unexpected isa range size," 548 printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
549 "mapping 64k\n"); 549 "mapping 64k\n");
550 __ioremap_explicit(phb_io_base_phys, (unsigned long)phb_io_base_virt, 550 __ioremap_explicit(phb_io_base_phys,
551 0x10000, _PAGE_NO_CACHE); 551 (unsigned long)phb_io_base_virt,
552 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
552 return; 553 return;
553 } 554 }
554 555
@@ -576,7 +577,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
576 577
577 __ioremap_explicit(phb_io_base_phys, 578 __ioremap_explicit(phb_io_base_phys,
578 (unsigned long) phb_io_base_virt, 579 (unsigned long) phb_io_base_virt,
579 size, _PAGE_NO_CACHE); 580 size, _PAGE_NO_CACHE | _PAGE_GUARDED);
580 } 581 }
581} 582}
582 583
@@ -692,7 +693,7 @@ void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose,
692 struct resource *res; 693 struct resource *res;
693 694
694 hose->io_base_virt = __ioremap(hose->io_base_phys, size, 695 hose->io_base_virt = __ioremap(hose->io_base_phys, size,
695 _PAGE_NO_CACHE); 696 _PAGE_NO_CACHE | _PAGE_GUARDED);
696 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 697 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
697 hose->global_number, hose->io_base_phys, 698 hose->global_number, hose->io_base_phys,
698 (unsigned long) hose->io_base_virt); 699 (unsigned long) hose->io_base_virt);
@@ -780,7 +781,8 @@ int remap_bus_range(struct pci_bus *bus)
780 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 781 if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
781 return 1; 782 return 1;
782 printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); 783 printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
783 if (__ioremap_explicit(start_phys, start_virt, size, _PAGE_NO_CACHE)) 784 if (__ioremap_explicit(start_phys, start_virt, size,
785 _PAGE_NO_CACHE | _PAGE_GUARDED))
784 return 1; 786 return 1;
785 787
786 return 0; 788 return 0;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index 23813d03e1c4..a7149b9fc35c 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -155,7 +155,8 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
155 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); 155 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
156 156
157 pa = abs_to_phys(pa); 157 pa = abs_to_phys(pa);
158 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); 158 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
159 __pgprot(flags)));
159 spin_unlock(&ioremap_mm.page_table_lock); 160 spin_unlock(&ioremap_mm.page_table_lock);
160 } else { 161 } else {
161 unsigned long va, vpn, hash, hpteg; 162 unsigned long va, vpn, hash, hpteg;
@@ -191,12 +192,9 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
191 192
192 if ((flags & _PAGE_PRESENT) == 0) 193 if ((flags & _PAGE_PRESENT) == 0)
193 flags |= pgprot_val(PAGE_KERNEL); 194 flags |= pgprot_val(PAGE_KERNEL);
194 if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))
195 flags |= _PAGE_GUARDED;
196 195
197 for (i = 0; i < size; i += PAGE_SIZE) { 196 for (i = 0; i < size; i += PAGE_SIZE)
198 map_io_page(ea+i, pa+i, flags); 197 map_io_page(ea+i, pa+i, flags);
199 }
200 198
201 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 199 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
202} 200}
@@ -205,7 +203,7 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
205void __iomem * 203void __iomem *
206ioremap(unsigned long addr, unsigned long size) 204ioremap(unsigned long addr, unsigned long size)
207{ 205{
208 return __ioremap(addr, size, _PAGE_NO_CACHE); 206 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
209} 207}
210 208
211void __iomem * 209void __iomem *
@@ -272,7 +270,8 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
272 return 1; 270 return 1;
273 } 271 }
274 if (ea != (unsigned long) area->addr) { 272 if (ea != (unsigned long) area->addr) {
275 printk(KERN_ERR "unexpected addr return from im_get_area\n"); 273 printk(KERN_ERR "unexpected addr return from "
274 "im_get_area\n");
276 return 1; 275 return 1;
277 } 276 }
278 } 277 }
@@ -315,7 +314,8 @@ static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
315 continue; 314 continue;
316 if (pte_present(page)) 315 if (pte_present(page))
317 continue; 316 continue;
318 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); 317 printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
318 " table\n");
319 } while (address < end); 319 } while (address < end);
320} 320}
321 321
@@ -352,7 +352,7 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
352 * Access to IO memory should be serialized by driver. 352 * Access to IO memory should be serialized by driver.
353 * This code is modeled after vmalloc code - unmap_vm_area() 353 * This code is modeled after vmalloc code - unmap_vm_area()
354 * 354 *
355 * XXX what about calls before mem_init_done (ie python_countermeasures()) 355 * XXX what about calls before mem_init_done (ie python_countermeasures())
356 */ 356 */
357void iounmap(volatile void __iomem *token) 357void iounmap(volatile void __iomem *token)
358{ 358{