aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-03-02 23:13:25 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-03-02 23:13:25 -0500
commita1042aa248e4ea7f39d5ce13f080cbf3b6c42618 (patch)
tree6802af61a1e7e8072dad5bbef62b1e3fe1be1eba /arch
parent6eb3c735d29e799810ce82118f9260d0044327b7 (diff)
sh: check for existing mappings for bolted PMB entries.
When entries are being bolted unconditionally it's possible that the boot loader has established mappings that are within range that we don't want to clobber. Perform some basic validation to ensure that the new mapping is out of range before allowing the entry setup to take place. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/mm/pmb.c140
1 files changed, 96 insertions, 44 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 55d21902d707..75b8861ec624 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -128,13 +128,67 @@ static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
128 return pmb_flags; 128 return pmb_flags;
129} 129}
130 130
131static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) 131static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
132{ 132{
133 return (b->vpn == (a->vpn + a->size)) && 133 return (b->vpn == (a->vpn + a->size)) &&
134 (b->ppn == (a->ppn + a->size)) && 134 (b->ppn == (a->ppn + a->size)) &&
135 (b->flags == a->flags); 135 (b->flags == a->flags);
136} 136}
137 137
138static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
139 unsigned long size)
140{
141 int i;
142
143 read_lock(&pmb_rwlock);
144
145 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
146 struct pmb_entry *pmbe, *iter;
147 unsigned long span;
148
149 if (!test_bit(i, pmb_map))
150 continue;
151
152 pmbe = &pmb_entry_list[i];
153
154 /*
155 * See if VPN and PPN are bounded by an existing mapping.
156 */
157 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
158 continue;
159 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
160 continue;
161
162 /*
163 * Now see if we're in range of a simple mapping.
164 */
165 if (size <= pmbe->size) {
166 read_unlock(&pmb_rwlock);
167 return true;
168 }
169
170 span = pmbe->size;
171
172 /*
173 * Finally for sizes that involve compound mappings, walk
174 * the chain.
175 */
176 for (iter = pmbe->link; iter; iter = iter->link)
177 span += iter->size;
178
179 /*
180 * Nothing else to do if the range requirements are met.
181 */
182 if (size <= span) {
183 read_unlock(&pmb_rwlock);
184 return true;
185 }
186 }
187
188 read_unlock(&pmb_rwlock);
189 return false;
190}
191
138static bool pmb_size_valid(unsigned long size) 192static bool pmb_size_valid(unsigned long size)
139{ 193{
140 int i; 194 int i;
@@ -272,64 +326,62 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
272 unsigned long size, pgprot_t prot) 326 unsigned long size, pgprot_t prot)
273{ 327{
274 struct pmb_entry *pmbp, *pmbe; 328 struct pmb_entry *pmbp, *pmbe;
275 unsigned long pmb_flags; 329 unsigned long flags, pmb_flags;
276 int i, mapped; 330 int i, mapped;
277 331
278 if (!pmb_addr_valid(vaddr, size)) 332 if (!pmb_addr_valid(vaddr, size))
279 return -EFAULT; 333 return -EFAULT;
334 if (pmb_mapping_exists(vaddr, phys, size))
335 return 0;
280 336
281 pmb_flags = pgprot_to_pmb_flags(prot); 337 pmb_flags = pgprot_to_pmb_flags(prot);
282 pmbp = NULL; 338 pmbp = NULL;
283 339
284again: 340 do {
285 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 341 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
286 unsigned long flags; 342 if (size < pmb_sizes[i].size)
287 343 continue;
288 if (size < pmb_sizes[i].size) 344
289 continue; 345 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
290 346 pmb_sizes[i].flag, PMB_NO_ENTRY);
291 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, 347 if (IS_ERR(pmbe)) {
292 PMB_NO_ENTRY); 348 pmb_unmap_entry(pmbp, mapped);
293 if (IS_ERR(pmbe)) { 349 return PTR_ERR(pmbe);
294 pmb_unmap_entry(pmbp, mapped); 350 }
295 return PTR_ERR(pmbe);
296 }
297
298 spin_lock_irqsave(&pmbe->lock, flags);
299 351
300 pmbe->size = pmb_sizes[i].size; 352 spin_lock_irqsave(&pmbe->lock, flags);
301 353
302 __set_pmb_entry(pmbe); 354 pmbe->size = pmb_sizes[i].size;
303 355
304 phys += pmbe->size; 356 __set_pmb_entry(pmbe);
305 vaddr += pmbe->size;
306 size -= pmbe->size;
307 357
308 /* 358 phys += pmbe->size;
309 * Link adjacent entries that span multiple PMB entries 359 vaddr += pmbe->size;
310 * for easier tear-down. 360 size -= pmbe->size;
311 */
312 if (likely(pmbp)) {
313 spin_lock(&pmbp->lock);
314 pmbp->link = pmbe;
315 spin_unlock(&pmbp->lock);
316 }
317 361
318 pmbp = pmbe; 362 /*
363 * Link adjacent entries that span multiple PMB
364 * entries for easier tear-down.
365 */
366 if (likely(pmbp)) {
367 spin_lock(&pmbp->lock);
368 pmbp->link = pmbe;
369 spin_unlock(&pmbp->lock);
370 }
319 371
320 /* 372 pmbp = pmbe;
321 * Instead of trying smaller sizes on every iteration
322 * (even if we succeed in allocating space), try using
323 * pmb_sizes[i].size again.
324 */
325 i--;
326 mapped++;
327 373
328 spin_unlock_irqrestore(&pmbe->lock, flags); 374 /*
329 } 375 * Instead of trying smaller sizes on every
376 * iteration (even if we succeed in allocating
377 * space), try using pmb_sizes[i].size again.
378 */
379 i--;
380 mapped++;
330 381
331 if (size >= SZ_16M) 382 spin_unlock_irqrestore(&pmbe->lock, flags);
332 goto again; 383 }
384 } while (size >= SZ_16M);
333 385
334 return 0; 386 return 0;
335} 387}
@@ -374,7 +426,7 @@ void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
374 orig_addr = vaddr = (unsigned long)area->addr; 426 orig_addr = vaddr = (unsigned long)area->addr;
375 427
376 ret = pmb_bolt_mapping(vaddr, phys, size, prot); 428 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
377 if (ret != 0) 429 if (unlikely(ret != 0))
378 return ERR_PTR(ret); 430 return ERR_PTR(ret);
379 431
380 return (void __iomem *)(offset + (char *)orig_addr); 432 return (void __iomem *)(offset + (char *)orig_addr);