aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c412
1 files changed, 285 insertions, 127 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 198bcff5e96f..a4662e2782c3 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -23,7 +23,8 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/rwlock.h> 26#include <linux/vmalloc.h>
27#include <asm/cacheflush.h>
27#include <asm/sizes.h> 28#include <asm/sizes.h>
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
@@ -52,12 +53,24 @@ struct pmb_entry {
52 struct pmb_entry *link; 53 struct pmb_entry *link;
53}; 54};
54 55
56static struct {
57 unsigned long size;
58 int flag;
59} pmb_sizes[] = {
60 { .size = SZ_512M, .flag = PMB_SZ_512M, },
61 { .size = SZ_128M, .flag = PMB_SZ_128M, },
62 { .size = SZ_64M, .flag = PMB_SZ_64M, },
63 { .size = SZ_16M, .flag = PMB_SZ_16M, },
64};
65
55static void pmb_unmap_entry(struct pmb_entry *, int depth); 66static void pmb_unmap_entry(struct pmb_entry *, int depth);
56 67
57static DEFINE_RWLOCK(pmb_rwlock); 68static DEFINE_RWLOCK(pmb_rwlock);
58static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 69static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
59static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); 70static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
60 71
72static unsigned int pmb_iomapping_enabled;
73
61static __always_inline unsigned long mk_pmb_entry(unsigned int entry) 74static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
62{ 75{
63 return (entry & PMB_E_MASK) << PMB_E_SHIFT; 76 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
@@ -73,6 +86,142 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry)
73 return mk_pmb_entry(entry) | PMB_DATA; 86 return mk_pmb_entry(entry) | PMB_DATA;
74} 87}
75 88
89static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
90{
91 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
92}
93
94/*
95 * Ensure that the PMB entries match our cache configuration.
96 *
97 * When we are in 32-bit address extended mode, CCR.CB becomes
98 * invalid, so care must be taken to manually adjust cacheable
99 * translations.
100 */
101static __always_inline unsigned long pmb_cache_flags(void)
102{
103 unsigned long flags = 0;
104
105#if defined(CONFIG_CACHE_OFF)
106 flags |= PMB_WT | PMB_UB;
107#elif defined(CONFIG_CACHE_WRITETHROUGH)
108 flags |= PMB_C | PMB_WT | PMB_UB;
109#elif defined(CONFIG_CACHE_WRITEBACK)
110 flags |= PMB_C;
111#endif
112
113 return flags;
114}
115
116/*
117 * Convert typical pgprot value to the PMB equivalent
118 */
119static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
120{
121 unsigned long pmb_flags = 0;
122 u64 flags = pgprot_val(prot);
123
124 if (flags & _PAGE_CACHABLE)
125 pmb_flags |= PMB_C;
126 if (flags & _PAGE_WT)
127 pmb_flags |= PMB_WT | PMB_UB;
128
129 return pmb_flags;
130}
131
132static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
133{
134 return (b->vpn == (a->vpn + a->size)) &&
135 (b->ppn == (a->ppn + a->size)) &&
136 (b->flags == a->flags);
137}
138
139static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
140 unsigned long size)
141{
142 int i;
143
144 read_lock(&pmb_rwlock);
145
146 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
147 struct pmb_entry *pmbe, *iter;
148 unsigned long span;
149
150 if (!test_bit(i, pmb_map))
151 continue;
152
153 pmbe = &pmb_entry_list[i];
154
155 /*
156 * See if VPN and PPN are bounded by an existing mapping.
157 */
158 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
159 continue;
160 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
161 continue;
162
163 /*
164 * Now see if we're in range of a simple mapping.
165 */
166 if (size <= pmbe->size) {
167 read_unlock(&pmb_rwlock);
168 return true;
169 }
170
171 span = pmbe->size;
172
173 /*
174 * Finally for sizes that involve compound mappings, walk
175 * the chain.
176 */
177 for (iter = pmbe->link; iter; iter = iter->link)
178 span += iter->size;
179
180 /*
181 * Nothing else to do if the range requirements are met.
182 */
183 if (size <= span) {
184 read_unlock(&pmb_rwlock);
185 return true;
186 }
187 }
188
189 read_unlock(&pmb_rwlock);
190 return false;
191}
192
193static bool pmb_size_valid(unsigned long size)
194{
195 int i;
196
197 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
198 if (pmb_sizes[i].size == size)
199 return true;
200
201 return false;
202}
203
204static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
205{
206 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
207}
208
209static inline bool pmb_prot_valid(pgprot_t prot)
210{
211 return (pgprot_val(prot) & _PAGE_USER) == 0;
212}
213
214static int pmb_size_to_flags(unsigned long size)
215{
216 int i;
217
218 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
219 if (pmb_sizes[i].size == size)
220 return pmb_sizes[i].flag;
221
222 return 0;
223}
224
76static int pmb_alloc_entry(void) 225static int pmb_alloc_entry(void)
77{ 226{
78 int pos; 227 int pos;
@@ -140,33 +289,22 @@ static void pmb_free(struct pmb_entry *pmbe)
140} 289}
141 290
142/* 291/*
143 * Ensure that the PMB entries match our cache configuration. 292 * Must be run uncached.
144 *
145 * When we are in 32-bit address extended mode, CCR.CB becomes
146 * invalid, so care must be taken to manually adjust cacheable
147 * translations.
148 */ 293 */
149static __always_inline unsigned long pmb_cache_flags(void) 294static void __set_pmb_entry(struct pmb_entry *pmbe)
150{ 295{
151 unsigned long flags = 0; 296 unsigned long addr, data;
152 297
153#if defined(CONFIG_CACHE_WRITETHROUGH) 298 addr = mk_pmb_addr(pmbe->entry);
154 flags |= PMB_C | PMB_WT | PMB_UB; 299 data = mk_pmb_data(pmbe->entry);
155#elif defined(CONFIG_CACHE_WRITEBACK)
156 flags |= PMB_C;
157#endif
158 300
159 return flags; 301 jump_to_uncached();
160}
161 302
162/* 303 /* Set V-bit */
163 * Must be run uncached. 304 __raw_writel(pmbe->vpn | PMB_V, addr);
164 */ 305 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
165static void __set_pmb_entry(struct pmb_entry *pmbe) 306
166{ 307 back_to_cached();
167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
169 mk_pmb_data(pmbe->entry));
170} 308}
171 309
172static void __clear_pmb_entry(struct pmb_entry *pmbe) 310static void __clear_pmb_entry(struct pmb_entry *pmbe)
@@ -194,144 +332,155 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
194 spin_unlock_irqrestore(&pmbe->lock, flags); 332 spin_unlock_irqrestore(&pmbe->lock, flags);
195} 333}
196 334
197static struct { 335int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
198 unsigned long size; 336 unsigned long size, pgprot_t prot)
199 int flag;
200} pmb_sizes[] = {
201 { .size = SZ_512M, .flag = PMB_SZ_512M, },
202 { .size = SZ_128M, .flag = PMB_SZ_128M, },
203 { .size = SZ_64M, .flag = PMB_SZ_64M, },
204 { .size = SZ_16M, .flag = PMB_SZ_16M, },
205};
206
207long pmb_remap(unsigned long vaddr, unsigned long phys,
208 unsigned long size, pgprot_t prot)
209{ 337{
210 struct pmb_entry *pmbp, *pmbe; 338 struct pmb_entry *pmbp, *pmbe;
211 unsigned long wanted; 339 unsigned long orig_addr, orig_size;
212 int pmb_flags, i; 340 unsigned long flags, pmb_flags;
213 long err; 341 int i, mapped;
214 u64 flags;
215 342
216 flags = pgprot_val(prot); 343 if (!pmb_addr_valid(vaddr, size))
344 return -EFAULT;
345 if (pmb_mapping_exists(vaddr, phys, size))
346 return 0;
217 347
218 pmb_flags = PMB_WT | PMB_UB; 348 orig_addr = vaddr;
219 349 orig_size = size;
220 /* Convert typical pgprot value to the PMB equivalent */
221 if (flags & _PAGE_CACHABLE) {
222 pmb_flags |= PMB_C;
223 350
224 if ((flags & _PAGE_WT) == 0) 351 flush_tlb_kernel_range(vaddr, vaddr + size);
225 pmb_flags &= ~(PMB_WT | PMB_UB);
226 }
227 352
353 pmb_flags = pgprot_to_pmb_flags(prot);
228 pmbp = NULL; 354 pmbp = NULL;
229 wanted = size;
230 355
231again: 356 do {
232 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 357 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
233 unsigned long flags; 358 if (size < pmb_sizes[i].size)
359 continue;
360
361 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
362 pmb_sizes[i].flag, PMB_NO_ENTRY);
363 if (IS_ERR(pmbe)) {
364 pmb_unmap_entry(pmbp, mapped);
365 return PTR_ERR(pmbe);
366 }
234 367
235 if (size < pmb_sizes[i].size) 368 spin_lock_irqsave(&pmbe->lock, flags);
236 continue;
237 369
238 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, 370 pmbe->size = pmb_sizes[i].size;
239 PMB_NO_ENTRY);
240 if (IS_ERR(pmbe)) {
241 err = PTR_ERR(pmbe);
242 goto out;
243 }
244 371
245 spin_lock_irqsave(&pmbe->lock, flags); 372 __set_pmb_entry(pmbe);
246 373
247 __set_pmb_entry(pmbe); 374 phys += pmbe->size;
375 vaddr += pmbe->size;
376 size -= pmbe->size;
248 377
249 phys += pmb_sizes[i].size; 378 /*
250 vaddr += pmb_sizes[i].size; 379 * Link adjacent entries that span multiple PMB
251 size -= pmb_sizes[i].size; 380 * entries for easier tear-down.
381 */
382 if (likely(pmbp)) {
383 spin_lock(&pmbp->lock);
384 pmbp->link = pmbe;
385 spin_unlock(&pmbp->lock);
386 }
252 387
253 pmbe->size = pmb_sizes[i].size; 388 pmbp = pmbe;
254 389
255 /* 390 /*
256 * Link adjacent entries that span multiple PMB entries 391 * Instead of trying smaller sizes on every
257 * for easier tear-down. 392 * iteration (even if we succeed in allocating
258 */ 393 * space), try using pmb_sizes[i].size again.
259 if (likely(pmbp)) { 394 */
260 spin_lock(&pmbp->lock); 395 i--;
261 pmbp->link = pmbe; 396 mapped++;
262 spin_unlock(&pmbp->lock); 397
398 spin_unlock_irqrestore(&pmbe->lock, flags);
263 } 399 }
400 } while (size >= SZ_16M);
264 401
265 pmbp = pmbe; 402 flush_cache_vmap(orig_addr, orig_addr + orig_size);
266 403
267 /* 404 return 0;
268 * Instead of trying smaller sizes on every iteration 405}
269 * (even if we succeed in allocating space), try using
270 * pmb_sizes[i].size again.
271 */
272 i--;
273 406
274 spin_unlock_irqrestore(&pmbe->lock, flags); 407void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
275 } 408 pgprot_t prot, void *caller)
409{
410 unsigned long vaddr;
411 phys_addr_t offset, last_addr;
412 phys_addr_t align_mask;
413 unsigned long aligned;
414 struct vm_struct *area;
415 int i, ret;
276 416
277 if (size >= SZ_16M) 417 if (!pmb_iomapping_enabled)
278 goto again; 418 return NULL;
279 419
280 return wanted - size; 420 /*
421 * Small mappings need to go through the TLB.
422 */
423 if (size < SZ_16M)
424 return ERR_PTR(-EINVAL);
425 if (!pmb_prot_valid(prot))
426 return ERR_PTR(-EINVAL);
281 427
282out: 428 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
283 pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); 429 if (size >= pmb_sizes[i].size)
430 break;
431
432 last_addr = phys + size;
433 align_mask = ~(pmb_sizes[i].size - 1);
434 offset = phys & ~align_mask;
435 phys &= align_mask;
436 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
437
438 /*
439 * XXX: This should really start from uncached_end, but this
440 * causes the MMU to reset, so for now we restrict it to the
441 * 0xb000...0xc000 range.
442 */
443 area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
444 P3SEG, caller);
445 if (!area)
446 return NULL;
447
448 area->phys_addr = phys;
449 vaddr = (unsigned long)area->addr;
450
451 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
452 if (unlikely(ret != 0))
453 return ERR_PTR(ret);
284 454
285 return err; 455 return (void __iomem *)(offset + (char *)vaddr);
286} 456}
287 457
288void pmb_unmap(unsigned long addr) 458int pmb_unmap(void __iomem *addr)
289{ 459{
290 struct pmb_entry *pmbe = NULL; 460 struct pmb_entry *pmbe = NULL;
291 int i; 461 unsigned long vaddr = (unsigned long __force)addr;
462 int i, found = 0;
292 463
293 read_lock(&pmb_rwlock); 464 read_lock(&pmb_rwlock);
294 465
295 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 466 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
296 if (test_bit(i, pmb_map)) { 467 if (test_bit(i, pmb_map)) {
297 pmbe = &pmb_entry_list[i]; 468 pmbe = &pmb_entry_list[i];
298 if (pmbe->vpn == addr) 469 if (pmbe->vpn == vaddr) {
470 found = 1;
299 break; 471 break;
472 }
300 } 473 }
301 } 474 }
302 475
303 read_unlock(&pmb_rwlock); 476 read_unlock(&pmb_rwlock);
304 477
305 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); 478 if (found) {
306} 479 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
307 480 return 0;
308static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) 481 }
309{
310 return (b->vpn == (a->vpn + a->size)) &&
311 (b->ppn == (a->ppn + a->size)) &&
312 (b->flags == a->flags);
313}
314
315static bool pmb_size_valid(unsigned long size)
316{
317 int i;
318
319 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
320 if (pmb_sizes[i].size == size)
321 return true;
322
323 return false;
324}
325
326static int pmb_size_to_flags(unsigned long size)
327{
328 int i;
329
330 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
331 if (pmb_sizes[i].size == size)
332 return pmb_sizes[i].flag;
333 482
334 return 0; 483 return -EINVAL;
335} 484}
336 485
337static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) 486static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
@@ -351,6 +500,8 @@ static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
351 */ 500 */
352 __clear_pmb_entry(pmbe); 501 __clear_pmb_entry(pmbe);
353 502
503 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
504
354 pmbe = pmblink->link; 505 pmbe = pmblink->link;
355 506
356 pmb_free(pmblink); 507 pmb_free(pmblink);
@@ -369,11 +520,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
369 write_unlock_irqrestore(&pmb_rwlock, flags); 520 write_unlock_irqrestore(&pmb_rwlock, flags);
370} 521}
371 522
372static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
373{
374 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
375}
376
377static void __init pmb_notify(void) 523static void __init pmb_notify(void)
378{ 524{
379 int i; 525 int i;
@@ -625,6 +771,18 @@ static void __init pmb_resize(void)
625} 771}
626#endif 772#endif
627 773
774static int __init early_pmb(char *p)
775{
776 if (!p)
777 return 0;
778
779 if (strstr(p, "iomap"))
780 pmb_iomapping_enabled = 1;
781
782 return 0;
783}
784early_param("pmb", early_pmb);
785
628void __init pmb_init(void) 786void __init pmb_init(void)
629{ 787{
630 /* Synchronize software state */ 788 /* Synchronize software state */
@@ -713,7 +871,7 @@ static int __init pmb_debugfs_init(void)
713 871
714 return 0; 872 return 0;
715} 873}
716postcore_initcall(pmb_debugfs_init); 874subsys_initcall(pmb_debugfs_init);
717 875
718#ifdef CONFIG_PM 876#ifdef CONFIG_PM
719static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) 877static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)