aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 02:16:42 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 02:16:42 -0400
commitd7cdc9e8ac82c43fdcd4fde6b5b53d2dcba7f707 (patch)
tree2489b1831a1b5818af0927e4d3c8933bf7d05f31 /arch/sh/mm/pmb.c
parent26ff6c11ef38e08990c1e417c299246e6ab18ff7 (diff)
sh: ioremap() overhaul.
ioremap() overhaul. Add support for transparent PMB mapping, get rid of p3_ioremap(), etc. Also drop ioremap() and iounmap() routines from the machvec, as everyone can use the generic ioremap() API instead. For PCI memory apertures and other special cases, use the pci_iomap() API, as boards are already required to get the mapping right there. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c145
1 files changed, 140 insertions, 5 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index ff5bde745647..819fd0faf022 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005 Paul Mundt 6 * Copyright (C) 2005, 2006 Paul Mundt
7 * 7 *
8 * P1/P2 Section mapping definitions from map32.h, which was: 8 * P1/P2 Section mapping definitions from map32.h, which was:
9 * 9 *
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
27#include <asm/mmu.h> 28#include <asm/mmu.h>
28#include <asm/io.h> 29#include <asm/io.h>
29 30
@@ -127,11 +128,15 @@ repeat:
127 return 0; 128 return 0;
128} 129}
129 130
130void set_pmb_entry(struct pmb_entry *pmbe) 131int set_pmb_entry(struct pmb_entry *pmbe)
131{ 132{
133 int ret;
134
132 jump_to_P2(); 135 jump_to_P2();
133 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); 136 ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
134 back_to_P1(); 137 back_to_P1();
138
139 return ret;
135} 140}
136 141
137void clear_pmb_entry(struct pmb_entry *pmbe) 142void clear_pmb_entry(struct pmb_entry *pmbe)
@@ -162,11 +167,141 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
162 clear_bit(entry, &pmb_map); 167 clear_bit(entry, &pmb_map);
163} 168}
164 169
170static DEFINE_SPINLOCK(pmb_list_lock);
171static struct pmb_entry *pmb_list;
172
173static inline void pmb_list_add(struct pmb_entry *pmbe)
174{
175 struct pmb_entry **p, *tmp;
176
177 p = &pmb_list;
178 while ((tmp = *p) != NULL)
179 p = &tmp->next;
180
181 pmbe->next = tmp;
182 *p = pmbe;
183}
184
185static inline void pmb_list_del(struct pmb_entry *pmbe)
186{
187 struct pmb_entry **p, *tmp;
188
189 for (p = &pmb_list; (tmp = *p); p = &tmp->next)
190 if (tmp == pmbe) {
191 *p = tmp->next;
192 return;
193 }
194}
195
196static struct {
197 unsigned long size;
198 int flag;
199} pmb_sizes[] = {
200 { .size = 0x20000000, .flag = PMB_SZ_512M, },
201 { .size = 0x08000000, .flag = PMB_SZ_128M, },
202 { .size = 0x04000000, .flag = PMB_SZ_64M, },
203 { .size = 0x01000000, .flag = PMB_SZ_16M, },
204};
205
206long pmb_remap(unsigned long vaddr, unsigned long phys,
207 unsigned long size, unsigned long flags)
208{
209 struct pmb_entry *pmbp;
210 unsigned long wanted;
211 int pmb_flags, i;
212
213 /* Convert typical pgprot value to the PMB equivalent */
214 if (flags & _PAGE_CACHABLE) {
215 if (flags & _PAGE_WT)
216 pmb_flags = PMB_WT;
217 else
218 pmb_flags = PMB_C;
219 } else
220 pmb_flags = PMB_WT | PMB_UB;
221
222 pmbp = NULL;
223 wanted = size;
224
225again:
226 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
227 struct pmb_entry *pmbe;
228 int ret;
229
230 if (size < pmb_sizes[i].size)
231 continue;
232
233 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
234 if (IS_ERR(pmbe))
235 return PTR_ERR(pmbe);
236
237 ret = set_pmb_entry(pmbe);
238 if (ret != 0) {
239 pmb_free(pmbe);
240 return -EBUSY;
241 }
242
243 phys += pmb_sizes[i].size;
244 vaddr += pmb_sizes[i].size;
245 size -= pmb_sizes[i].size;
246
247 /*
248 * Link adjacent entries that span multiple PMB entries
249 * for easier tear-down.
250 */
251 if (likely(pmbp))
252 pmbp->link = pmbe;
253
254 pmbp = pmbe;
255 }
256
257 if (size >= 0x1000000)
258 goto again;
259
260 return wanted - size;
261}
262
263void pmb_unmap(unsigned long addr)
264{
265 struct pmb_entry **p, *pmbe;
266
267 for (p = &pmb_list; (pmbe = *p); p = &pmbe->next)
268 if (pmbe->vpn == addr)
269 break;
270
271 if (unlikely(!pmbe))
272 return;
273
274 WARN_ON(!test_bit(pmbe->entry, &pmb_map));
275
276 do {
277 struct pmb_entry *pmblink = pmbe;
278
279 clear_pmb_entry(pmbe);
280 pmbe = pmblink->link;
281
282 pmb_free(pmblink);
283 } while (pmbe);
284}
285
165static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) 286static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
166{ 287{
288 struct pmb_entry *pmbe = pmb;
289
167 memset(pmb, 0, sizeof(struct pmb_entry)); 290 memset(pmb, 0, sizeof(struct pmb_entry));
168 291
169 ((struct pmb_entry *)pmb)->entry = PMB_NO_ENTRY; 292 spin_lock_irq(&pmb_list_lock);
293
294 pmbe->entry = PMB_NO_ENTRY;
295 pmb_list_add(pmbe);
296
297 spin_unlock_irq(&pmb_list_lock);
298}
299
300static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
301{
302 spin_lock_irq(&pmb_list_lock);
303 pmb_list_del(pmb);
304 spin_unlock_irq(&pmb_list_lock);
170} 305}
171 306
172static int __init pmb_init(void) 307static int __init pmb_init(void)
@@ -177,7 +312,7 @@ static int __init pmb_init(void)
177 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); 312 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
178 313
179 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 314 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry),
180 0, 0, pmb_cache_ctor, NULL); 315 0, 0, pmb_cache_ctor, pmb_cache_dtor);
181 BUG_ON(!pmb_cache); 316 BUG_ON(!pmb_cache);
182 317
183 jump_to_P2(); 318 jump_to_P2();