aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/ioremap.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-01-17 01:14:15 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-17 02:15:28 -0500
commitb66c1a3919abb40f9bd8fb92a0d9fd77eb899c54 (patch)
treee83c11e63f760e8a3c09ab44e8c951a6df0400b7 /arch/sh/mm/ioremap.c
parentbf3a00f88c926635932c91afd90b4a0907dfbe78 (diff)
[PATCH] sh: I/O routine cleanups and ioremap() overhaul
This introduces a few changes in the way that the I/O routines are defined on SH, specifically so that things like the iomap API properly wrap through the machvec for board-specific quirks. In addition to this, the old p3_ioremap() work is converted to a more generic __ioremap() that will map through the PMB if it's available, or fall back on page tables for everything else. An alpha-like IO_CONCAT is also added so we can start to clean up the board-specific io.h mess, which will be handled in board update patches.. Signed-off-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sh/mm/ioremap.c')
-rw-r--r--arch/sh/mm/ioremap.c99
1 files changed, 81 insertions, 18 deletions
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index e794e27a72f..96fa4a999e2 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -6,13 +6,19 @@
6 * 640k-1MB IO memory area on PC's 6 * 640k-1MB IO memory area on PC's
7 * 7 *
8 * (C) Copyright 1995 1996 Linus Torvalds 8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2005, 2006 Paul Mundt
10 *
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
9 */ 14 */
10
11#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/module.h>
12#include <linux/mm.h> 17#include <linux/mm.h>
13#include <asm/io.h> 18#include <asm/io.h>
14#include <asm/page.h> 19#include <asm/page.h>
15#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
21#include <asm/addrspace.h>
16#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
17#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
18 24
@@ -80,9 +86,15 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
80 if (address >= end) 86 if (address >= end)
81 BUG(); 87 BUG();
82 do { 88 do {
89 pud_t *pud;
83 pmd_t *pmd; 90 pmd_t *pmd;
84 pmd = pmd_alloc(&init_mm, dir, address); 91
85 error = -ENOMEM; 92 error = -ENOMEM;
93
94 pud = pud_alloc(&init_mm, dir, address);
95 if (!pud)
96 break;
97 pmd = pmd_alloc(&init_mm, pud, address);
86 if (!pmd) 98 if (!pmd)
87 break; 99 break;
88 if (remap_area_pmd(pmd, address, end - address, 100 if (remap_area_pmd(pmd, address, end - address,
@@ -97,10 +109,6 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
97} 109}
98 110
99/* 111/*
100 * Generic mapping function (not visible outside):
101 */
102
103/*
104 * Remap an arbitrary physical address space into the kernel virtual 112 * Remap an arbitrary physical address space into the kernel virtual
105 * address space. Needed when the kernel wants to access high addresses 113 * address space. Needed when the kernel wants to access high addresses
106 * directly. 114 * directly.
@@ -109,11 +117,11 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
109 * have to convert them into an offset in a page-aligned mapping, but the 117 * have to convert them into an offset in a page-aligned mapping, but the
110 * caller shouldn't need to know that small detail. 118 * caller shouldn't need to know that small detail.
111 */ 119 */
112void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 120void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
121 unsigned long flags)
113{ 122{
114 void * addr;
115 struct vm_struct * area; 123 struct vm_struct * area;
116 unsigned long offset, last_addr; 124 unsigned long offset, last_addr, addr, orig_addr;
117 125
118 /* Don't allow wraparound or zero size */ 126 /* Don't allow wraparound or zero size */
119 last_addr = phys_addr + size - 1; 127 last_addr = phys_addr + size - 1;
@@ -124,7 +132,7 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla
124 * Don't remap the low PCI/ISA area, it's always mapped.. 132 * Don't remap the low PCI/ISA area, it's always mapped..
125 */ 133 */
126 if (phys_addr >= 0xA0000 && last_addr < 0x100000) 134 if (phys_addr >= 0xA0000 && last_addr < 0x100000)
127 return phys_to_virt(phys_addr); 135 return (void __iomem *)phys_to_virt(phys_addr);
128 136
129 /* 137 /*
130 * Don't allow anybody to remap normal RAM that we're using.. 138 * Don't allow anybody to remap normal RAM that we're using..
@@ -146,16 +154,71 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla
146 if (!area) 154 if (!area)
147 return NULL; 155 return NULL;
148 area->phys_addr = phys_addr; 156 area->phys_addr = phys_addr;
149 addr = area->addr; 157 orig_addr = addr = (unsigned long)area->addr;
150 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 158
151 vunmap(addr); 159#ifdef CONFIG_32BIT
152 return NULL; 160 /*
161 * First try to remap through the PMB once a valid VMA has been
162 * established. Smaller allocations (or the rest of the size
163 * remaining after a PMB mapping due to the size not being
164 * perfectly aligned on a PMB size boundary) are then mapped
165 * through the UTLB using conventional page tables.
166 *
167 * PMB entries are all pre-faulted.
168 */
169 if (unlikely(size >= 0x1000000)) {
170 unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
171
172 if (likely(mapped)) {
173 addr += mapped;
174 phys_addr += mapped;
175 size -= mapped;
176 }
153 } 177 }
154 return (void *) (offset + (char *)addr); 178#endif
179
180 if (likely(size))
181 if (remap_area_pages(addr, phys_addr, size, flags)) {
182 vunmap((void *)orig_addr);
183 return NULL;
184 }
185
186 return (void __iomem *)(offset + (char *)orig_addr);
155} 187}
188EXPORT_SYMBOL(__ioremap);
156 189
157void p3_iounmap(void *addr) 190void __iounmap(void __iomem *addr)
158{ 191{
159 if (addr > high_memory) 192 unsigned long vaddr = (unsigned long __force)addr;
160 vfree((void *)(PAGE_MASK & (unsigned long)addr)); 193 struct vm_struct *p;
194
195 if (PXSEG(vaddr) < P3SEG)
196 return;
197
198#ifdef CONFIG_32BIT
199 /*
200 * Purge any PMB entries that may have been established for this
201 * mapping, then proceed with conventional VMA teardown.
202 *
203 * XXX: Note that due to the way that remove_vm_area() does
204 * matching of the resultant VMA, we aren't able to fast-forward
205 * the address past the PMB space until the end of the VMA where
206 * the page tables reside. As such, unmap_vm_area() will be
207 * forced to linearly scan over the area until it finds the page
208 * tables where PTEs that need to be unmapped actually reside,
209 * which is far from optimal. Perhaps we need to use a separate
210 * VMA for the PMB mappings?
211 * -- PFM.
212 */
213 pmb_unmap(vaddr);
214#endif
215
216 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
217 if (!p) {
218 printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
219 return;
220 }
221
222 kfree(p);
161} 223}
224EXPORT_SYMBOL(__iounmap);