aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-23 02:20:53 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-03-02 02:40:06 -0500
commit90e7d649d86f21d478dc134f74c88e19dd472393 (patch)
tree7526b5320c01da9efd2ce581369b000baa91e0da /arch
parent94316cdadb0067ba6d1f08b9a6f84fe755bdaa38 (diff)
sh: reworked dynamic PMB mapping.
This implements a fairly significant overhaul of the dynamic PMB mapping code. The primary change here is that the PMB gets its own VMA that follows the uncached mapping and we attempt to be a bit more intelligent with dynamic sizing, multi-entry mapping, and so forth. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/io.h23
-rw-r--r--arch/sh/include/asm/mmu.h31
-rw-r--r--arch/sh/mm/ioremap.c70
-rw-r--r--arch/sh/mm/ioremap_fixed.c11
-rw-r--r--arch/sh/mm/pmb.c256
5 files changed, 223 insertions, 168 deletions
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 7dab7b23a5ec..f689554e17c1 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -291,21 +291,21 @@ unsigned long long poke_real_address_q(unsigned long long addr,
291 * doesn't exist, so everything must go through page tables. 291 * doesn't exist, so everything must go through page tables.
292 */ 292 */
293#ifdef CONFIG_MMU 293#ifdef CONFIG_MMU
294void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, 294void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
295 pgprot_t prot, void *caller); 295 pgprot_t prot, void *caller);
296void __iounmap(void __iomem *addr); 296void __iounmap(void __iomem *addr);
297 297
298static inline void __iomem * 298static inline void __iomem *
299__ioremap(unsigned long offset, unsigned long size, pgprot_t prot) 299__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
300{ 300{
301 return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); 301 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
302} 302}
303 303
304static inline void __iomem * 304static inline void __iomem *
305__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) 305__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
306{ 306{
307#ifdef CONFIG_29BIT 307#ifdef CONFIG_29BIT
308 unsigned long last_addr = offset + size - 1; 308 phys_addr_t last_addr = offset + size - 1;
309 309
310 /* 310 /*
311 * For P1 and P2 space this is trivial, as everything is already 311 * For P1 and P2 space this is trivial, as everything is already
@@ -329,7 +329,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
329} 329}
330 330
331static inline void __iomem * 331static inline void __iomem *
332__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) 332__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
333{ 333{
334 void __iomem *ret; 334 void __iomem *ret;
335 335
@@ -349,35 +349,32 @@ __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
349#define __iounmap(addr) do { } while (0) 349#define __iounmap(addr) do { } while (0)
350#endif /* CONFIG_MMU */ 350#endif /* CONFIG_MMU */
351 351
352static inline void __iomem * 352static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
353ioremap(unsigned long offset, unsigned long size)
354{ 353{
355 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); 354 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
356} 355}
357 356
358static inline void __iomem * 357static inline void __iomem *
359ioremap_cache(unsigned long offset, unsigned long size) 358ioremap_cache(phys_addr_t offset, unsigned long size)
360{ 359{
361 return __ioremap_mode(offset, size, PAGE_KERNEL); 360 return __ioremap_mode(offset, size, PAGE_KERNEL);
362} 361}
363 362
364#ifdef CONFIG_HAVE_IOREMAP_PROT 363#ifdef CONFIG_HAVE_IOREMAP_PROT
365static inline void __iomem * 364static inline void __iomem *
366ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) 365ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
367{ 366{
368 return __ioremap_mode(offset, size, __pgprot(flags)); 367 return __ioremap_mode(offset, size, __pgprot(flags));
369} 368}
370#endif 369#endif
371 370
372#ifdef CONFIG_IOREMAP_FIXED 371#ifdef CONFIG_IOREMAP_FIXED
373extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, 372extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
374 unsigned long, pgprot_t);
375extern int iounmap_fixed(void __iomem *); 373extern int iounmap_fixed(void __iomem *);
376extern void ioremap_fixed_init(void); 374extern void ioremap_fixed_init(void);
377#else 375#else
378static inline void __iomem * 376static inline void __iomem *
379ioremap_fixed(resource_size_t phys_addr, unsigned long offset, 377ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
380 unsigned long size, pgprot_t prot)
381{ 378{
382 BUG(); 379 BUG();
383 return NULL; 380 return NULL;
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 15a05b615ba7..19fe84550b49 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -55,19 +55,29 @@ typedef struct {
55 55
56#ifdef CONFIG_PMB 56#ifdef CONFIG_PMB
57/* arch/sh/mm/pmb.c */ 57/* arch/sh/mm/pmb.c */
58long pmb_remap(unsigned long virt, unsigned long phys,
59 unsigned long size, pgprot_t prot);
60void pmb_unmap(unsigned long addr);
61void pmb_init(void);
62bool __in_29bit_mode(void); 58bool __in_29bit_mode(void);
59
60void pmb_init(void);
61int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
62 unsigned long size, pgprot_t prot);
63void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
64 pgprot_t prot, void *caller);
65int pmb_unmap(void __iomem *addr);
66
63#else 67#else
64static inline long pmb_remap(unsigned long virt, unsigned long phys, 68
65 unsigned long size, pgprot_t prot) 69static inline void __iomem *
70pmb_remap_caller(phys_addr_t phys, unsigned long size,
71 pgprot_t prot, void *caller)
72{
73 return NULL;
74}
75
76static inline int pmb_unmap(void __iomem *addr)
66{ 77{
67 return -EINVAL; 78 return -EINVAL;
68} 79}
69 80
70#define pmb_unmap(addr) do { } while (0)
71#define pmb_init(addr) do { } while (0) 81#define pmb_init(addr) do { } while (0)
72 82
73#ifdef CONFIG_29BIT 83#ifdef CONFIG_29BIT
@@ -77,6 +87,13 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys,
77#endif 87#endif
78 88
79#endif /* CONFIG_PMB */ 89#endif /* CONFIG_PMB */
90
91static inline void __iomem *
92pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot)
93{
94 return pmb_remap_caller(phys, size, prot, __builtin_return_address(0));
95}
96
80#endif /* __ASSEMBLY__ */ 97#endif /* __ASSEMBLY__ */
81 98
82#endif /* __MMU_H */ 99#endif /* __MMU_H */
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index c68d2d7d00a9..1ab2385ecefe 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -34,11 +34,12 @@
34 * caller shouldn't need to know that small detail. 34 * caller shouldn't need to know that small detail.
35 */ 35 */
36void __iomem * __init_refok 36void __iomem * __init_refok
37__ioremap_caller(unsigned long phys_addr, unsigned long size, 37__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
38 pgprot_t pgprot, void *caller) 38 pgprot_t pgprot, void *caller)
39{ 39{
40 struct vm_struct *area; 40 struct vm_struct *area;
41 unsigned long offset, last_addr, addr, orig_addr; 41 unsigned long offset, last_addr, addr, orig_addr;
42 void __iomem *mapped;
42 43
43 /* Don't allow wraparound or zero size */ 44 /* Don't allow wraparound or zero size */
44 last_addr = phys_addr + size - 1; 45 last_addr = phys_addr + size - 1;
@@ -46,6 +47,20 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
46 return NULL; 47 return NULL;
47 48
48 /* 49 /*
50 * If we can't yet use the regular approach, go the fixmap route.
51 */
52 if (!mem_init_done)
53 return ioremap_fixed(phys_addr, size, pgprot);
54
55 /*
56 * First try to remap through the PMB.
57 * PMB entries are all pre-faulted.
58 */
59 mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
60 if (mapped && !IS_ERR(mapped))
61 return mapped;
62
63 /*
49 * Mappings have to be page-aligned 64 * Mappings have to be page-aligned
50 */ 65 */
51 offset = phys_addr & ~PAGE_MASK; 66 offset = phys_addr & ~PAGE_MASK;
@@ -53,12 +68,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
53 size = PAGE_ALIGN(last_addr+1) - phys_addr; 68 size = PAGE_ALIGN(last_addr+1) - phys_addr;
54 69
55 /* 70 /*
56 * If we can't yet use the regular approach, go the fixmap route.
57 */
58 if (!mem_init_done)
59 return ioremap_fixed(phys_addr, offset, size, pgprot);
60
61 /*
62 * Ok, go for it.. 71 * Ok, go for it..
63 */ 72 */
64 area = get_vm_area_caller(size, VM_IOREMAP, caller); 73 area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -67,33 +76,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
67 area->phys_addr = phys_addr; 76 area->phys_addr = phys_addr;
68 orig_addr = addr = (unsigned long)area->addr; 77 orig_addr = addr = (unsigned long)area->addr;
69 78
70#ifdef CONFIG_PMB 79 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
71 /* 80 vunmap((void *)orig_addr);
72 * First try to remap through the PMB once a valid VMA has been 81 return NULL;
73 * established. Smaller allocations (or the rest of the size
74 * remaining after a PMB mapping due to the size not being
75 * perfectly aligned on a PMB size boundary) are then mapped
76 * through the UTLB using conventional page tables.
77 *
78 * PMB entries are all pre-faulted.
79 */
80 if (unlikely(phys_addr >= P1SEG)) {
81 unsigned long mapped;
82
83 mapped = pmb_remap(addr, phys_addr, size, pgprot);
84 if (likely(mapped)) {
85 addr += mapped;
86 phys_addr += mapped;
87 size -= mapped;
88 }
89 } 82 }
90#endif
91
92 if (likely(size))
93 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
94 vunmap((void *)orig_addr);
95 return NULL;
96 }
97 83
98 return (void __iomem *)(offset + (char *)orig_addr); 84 return (void __iomem *)(offset + (char *)orig_addr);
99} 85}
@@ -133,23 +119,11 @@ void __iounmap(void __iomem *addr)
133 if (iounmap_fixed(addr) == 0) 119 if (iounmap_fixed(addr) == 0)
134 return; 120 return;
135 121
136#ifdef CONFIG_PMB
137 /* 122 /*
138 * Purge any PMB entries that may have been established for this 123 * If the PMB handled it, there's nothing else to do.
139 * mapping, then proceed with conventional VMA teardown.
140 *
141 * XXX: Note that due to the way that remove_vm_area() does
142 * matching of the resultant VMA, we aren't able to fast-forward
143 * the address past the PMB space until the end of the VMA where
144 * the page tables reside. As such, unmap_vm_area() will be
145 * forced to linearly scan over the area until it finds the page
146 * tables where PTEs that need to be unmapped actually reside,
147 * which is far from optimal. Perhaps we need to use a separate
148 * VMA for the PMB mappings?
149 * -- PFM.
150 */ 124 */
151 pmb_unmap(vaddr); 125 if (pmb_unmap(addr) == 0)
152#endif 126 return;
153 127
154 p = remove_vm_area((void *)(vaddr & PAGE_MASK)); 128 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
155 if (!p) { 129 if (!p) {
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index 0b78b1e20ef1..7f682e5dafcf 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -45,14 +45,21 @@ void __init ioremap_fixed_init(void)
45} 45}
46 46
47void __init __iomem * 47void __init __iomem *
48ioremap_fixed(resource_size_t phys_addr, unsigned long offset, 48ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
49 unsigned long size, pgprot_t prot)
50{ 49{
51 enum fixed_addresses idx0, idx; 50 enum fixed_addresses idx0, idx;
52 struct ioremap_map *map; 51 struct ioremap_map *map;
53 unsigned int nrpages; 52 unsigned int nrpages;
53 unsigned long offset;
54 int i, slot; 54 int i, slot;
55 55
56 /*
57 * Mappings have to be page-aligned
58 */
59 offset = phys_addr & ~PAGE_MASK;
60 phys_addr &= PAGE_MASK;
61 size = PAGE_ALIGN(phys_addr + size) - phys_addr;
62
56 slot = -1; 63 slot = -1;
57 for (i = 0; i < FIX_N_IOREMAPS; i++) { 64 for (i = 0; i < FIX_N_IOREMAPS; i++) {
58 map = &ioremap_maps[i]; 65 map = &ioremap_maps[i];
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 35b364f931ea..9a516b89839a 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -23,6 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/vmalloc.h>
26#include <asm/sizes.h> 27#include <asm/sizes.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -51,6 +52,16 @@ struct pmb_entry {
51 struct pmb_entry *link; 52 struct pmb_entry *link;
52}; 53};
53 54
55static struct {
56 unsigned long size;
57 int flag;
58} pmb_sizes[] = {
59 { .size = SZ_512M, .flag = PMB_SZ_512M, },
60 { .size = SZ_128M, .flag = PMB_SZ_128M, },
61 { .size = SZ_64M, .flag = PMB_SZ_64M, },
62 { .size = SZ_16M, .flag = PMB_SZ_16M, },
63};
64
54static void pmb_unmap_entry(struct pmb_entry *, int depth); 65static void pmb_unmap_entry(struct pmb_entry *, int depth);
55 66
56static DEFINE_RWLOCK(pmb_rwlock); 67static DEFINE_RWLOCK(pmb_rwlock);
@@ -72,6 +83,88 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry)
72 return mk_pmb_entry(entry) | PMB_DATA; 83 return mk_pmb_entry(entry) | PMB_DATA;
73} 84}
74 85
86static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
87{
88 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
89}
90
91/*
92 * Ensure that the PMB entries match our cache configuration.
93 *
94 * When we are in 32-bit address extended mode, CCR.CB becomes
95 * invalid, so care must be taken to manually adjust cacheable
96 * translations.
97 */
98static __always_inline unsigned long pmb_cache_flags(void)
99{
100 unsigned long flags = 0;
101
102#if defined(CONFIG_CACHE_OFF)
103 flags |= PMB_WT | PMB_UB;
104#elif defined(CONFIG_CACHE_WRITETHROUGH)
105 flags |= PMB_C | PMB_WT | PMB_UB;
106#elif defined(CONFIG_CACHE_WRITEBACK)
107 flags |= PMB_C;
108#endif
109
110 return flags;
111}
112
113/*
114 * Convert typical pgprot value to the PMB equivalent
115 */
116static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
117{
118 unsigned long pmb_flags = 0;
119 u64 flags = pgprot_val(prot);
120
121 if (flags & _PAGE_CACHABLE)
122 pmb_flags |= PMB_C;
123 if (flags & _PAGE_WT)
124 pmb_flags |= PMB_WT | PMB_UB;
125
126 return pmb_flags;
127}
128
129static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
130{
131 return (b->vpn == (a->vpn + a->size)) &&
132 (b->ppn == (a->ppn + a->size)) &&
133 (b->flags == a->flags);
134}
135
136static bool pmb_size_valid(unsigned long size)
137{
138 int i;
139
140 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
141 if (pmb_sizes[i].size == size)
142 return true;
143
144 return false;
145}
146
147static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
148{
149 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
150}
151
152static inline bool pmb_prot_valid(pgprot_t prot)
153{
154 return (pgprot_val(prot) & _PAGE_USER) == 0;
155}
156
157static int pmb_size_to_flags(unsigned long size)
158{
159 int i;
160
161 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
162 if (pmb_sizes[i].size == size)
163 return pmb_sizes[i].flag;
164
165 return 0;
166}
167
75static int pmb_alloc_entry(void) 168static int pmb_alloc_entry(void)
76{ 169{
77 int pos; 170 int pos;
@@ -139,33 +232,13 @@ static void pmb_free(struct pmb_entry *pmbe)
139} 232}
140 233
141/* 234/*
142 * Ensure that the PMB entries match our cache configuration.
143 *
144 * When we are in 32-bit address extended mode, CCR.CB becomes
145 * invalid, so care must be taken to manually adjust cacheable
146 * translations.
147 */
148static __always_inline unsigned long pmb_cache_flags(void)
149{
150 unsigned long flags = 0;
151
152#if defined(CONFIG_CACHE_WRITETHROUGH)
153 flags |= PMB_C | PMB_WT | PMB_UB;
154#elif defined(CONFIG_CACHE_WRITEBACK)
155 flags |= PMB_C;
156#endif
157
158 return flags;
159}
160
161/*
162 * Must be run uncached. 235 * Must be run uncached.
163 */ 236 */
164static void __set_pmb_entry(struct pmb_entry *pmbe) 237static void __set_pmb_entry(struct pmb_entry *pmbe)
165{ 238{
166 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); 239 /* Set V-bit */
167 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, 240 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
168 mk_pmb_data(pmbe->entry)); 241 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
169} 242}
170 243
171static void __clear_pmb_entry(struct pmb_entry *pmbe) 244static void __clear_pmb_entry(struct pmb_entry *pmbe)
@@ -193,39 +266,56 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
193 spin_unlock_irqrestore(&pmbe->lock, flags); 266 spin_unlock_irqrestore(&pmbe->lock, flags);
194} 267}
195 268
196static struct { 269int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
197 unsigned long size; 270 unsigned long size, pgprot_t prot)
198 int flag; 271{
199} pmb_sizes[] = { 272 return 0;
200 { .size = SZ_512M, .flag = PMB_SZ_512M, }, 273}
201 { .size = SZ_128M, .flag = PMB_SZ_128M, },
202 { .size = SZ_64M, .flag = PMB_SZ_64M, },
203 { .size = SZ_16M, .flag = PMB_SZ_16M, },
204};
205 274
206long pmb_remap(unsigned long vaddr, unsigned long phys, 275void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
207 unsigned long size, pgprot_t prot) 276 pgprot_t prot, void *caller)
208{ 277{
209 struct pmb_entry *pmbp, *pmbe; 278 struct pmb_entry *pmbp, *pmbe;
210 unsigned long wanted; 279 unsigned long pmb_flags;
211 int pmb_flags, i; 280 int i, mapped;
212 long err; 281 unsigned long orig_addr, vaddr;
213 u64 flags; 282 phys_addr_t offset, last_addr;
283 phys_addr_t align_mask;
284 unsigned long aligned;
285 struct vm_struct *area;
214 286
215 flags = pgprot_val(prot); 287 /*
288 * Small mappings need to go through the TLB.
289 */
290 if (size < SZ_16M)
291 return ERR_PTR(-EINVAL);
292 if (!pmb_prot_valid(prot))
293 return ERR_PTR(-EINVAL);
216 294
217 pmb_flags = PMB_WT | PMB_UB; 295 pmbp = NULL;
296 pmb_flags = pgprot_to_pmb_flags(prot);
297 mapped = 0;
218 298
219 /* Convert typical pgprot value to the PMB equivalent */ 299 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
220 if (flags & _PAGE_CACHABLE) { 300 if (size >= pmb_sizes[i].size)
221 pmb_flags |= PMB_C; 301 break;
222 302
223 if ((flags & _PAGE_WT) == 0) 303 last_addr = phys + size;
224 pmb_flags &= ~(PMB_WT | PMB_UB); 304 align_mask = ~(pmb_sizes[i].size - 1);
225 } 305 offset = phys & ~align_mask;
306 phys &= align_mask;
307 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
226 308
227 pmbp = NULL; 309 area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
228 wanted = size; 310 P3SEG, caller);
311 if (!area)
312 return NULL;
313
314 area->phys_addr = phys;
315 orig_addr = vaddr = (unsigned long)area->addr;
316
317 if (!pmb_addr_valid(vaddr, aligned))
318 return ERR_PTR(-EFAULT);
229 319
230again: 320again:
231 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 321 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
@@ -237,19 +327,19 @@ again:
237 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, 327 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
238 PMB_NO_ENTRY); 328 PMB_NO_ENTRY);
239 if (IS_ERR(pmbe)) { 329 if (IS_ERR(pmbe)) {
240 err = PTR_ERR(pmbe); 330 pmb_unmap_entry(pmbp, mapped);
241 goto out; 331 return pmbe;
242 } 332 }
243 333
244 spin_lock_irqsave(&pmbe->lock, flags); 334 spin_lock_irqsave(&pmbe->lock, flags);
245 335
336 pmbe->size = pmb_sizes[i].size;
337
246 __set_pmb_entry(pmbe); 338 __set_pmb_entry(pmbe);
247 339
248 phys += pmb_sizes[i].size; 340 phys += pmbe->size;
249 vaddr += pmb_sizes[i].size; 341 vaddr += pmbe->size;
250 size -= pmb_sizes[i].size; 342 size -= pmbe->size;
251
252 pmbe->size = pmb_sizes[i].size;
253 343
254 /* 344 /*
255 * Link adjacent entries that span multiple PMB entries 345 * Link adjacent entries that span multiple PMB entries
@@ -269,6 +359,7 @@ again:
269 * pmb_sizes[i].size again. 359 * pmb_sizes[i].size again.
270 */ 360 */
271 i--; 361 i--;
362 mapped++;
272 363
273 spin_unlock_irqrestore(&pmbe->lock, flags); 364 spin_unlock_irqrestore(&pmbe->lock, flags);
274 } 365 }
@@ -276,61 +367,35 @@ again:
276 if (size >= SZ_16M) 367 if (size >= SZ_16M)
277 goto again; 368 goto again;
278 369
279 return wanted - size; 370 return (void __iomem *)(offset + (char *)orig_addr);
280
281out:
282 pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
283
284 return err;
285} 371}
286 372
287void pmb_unmap(unsigned long addr) 373int pmb_unmap(void __iomem *addr)
288{ 374{
289 struct pmb_entry *pmbe = NULL; 375 struct pmb_entry *pmbe = NULL;
290 int i; 376 unsigned long vaddr = (unsigned long __force)addr;
377 int i, found = 0;
291 378
292 read_lock(&pmb_rwlock); 379 read_lock(&pmb_rwlock);
293 380
294 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 381 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
295 if (test_bit(i, pmb_map)) { 382 if (test_bit(i, pmb_map)) {
296 pmbe = &pmb_entry_list[i]; 383 pmbe = &pmb_entry_list[i];
297 if (pmbe->vpn == addr) 384 if (pmbe->vpn == vaddr) {
385 found = 1;
298 break; 386 break;
387 }
299 } 388 }
300 } 389 }
301 390
302 read_unlock(&pmb_rwlock); 391 read_unlock(&pmb_rwlock);
303 392
304 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); 393 if (found) {
305} 394 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
306 395 return 0;
307static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) 396 }
308{
309 return (b->vpn == (a->vpn + a->size)) &&
310 (b->ppn == (a->ppn + a->size)) &&
311 (b->flags == a->flags);
312}
313
314static bool pmb_size_valid(unsigned long size)
315{
316 int i;
317
318 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
319 if (pmb_sizes[i].size == size)
320 return true;
321
322 return false;
323}
324
325static int pmb_size_to_flags(unsigned long size)
326{
327 int i;
328
329 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
330 if (pmb_sizes[i].size == size)
331 return pmb_sizes[i].flag;
332 397
333 return 0; 398 return -EINVAL;
334} 399}
335 400
336static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) 401static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
@@ -368,11 +433,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
368 write_unlock_irqrestore(&pmb_rwlock, flags); 433 write_unlock_irqrestore(&pmb_rwlock, flags);
369} 434}
370 435
371static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
372{
373 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
374}
375
376static void __init pmb_notify(void) 436static void __init pmb_notify(void)
377{ 437{
378 int i; 438 int i;