aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-03-04 02:44:20 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-03-04 02:44:20 -0500
commit281983d6ff2674ca2e4868de628c65809d84fa4c (patch)
treeabdf15ec83c5086220aff0c92d2112f8e05c3041 /arch/sh
parent09e1172317d1038918c5a139ba31155610f802b5 (diff)
sh: fix up MMU reset with variable PMB mapping sizes.
Presently we run in to issues with the MMU resetting the CPU when variable sized mappings are employed. This takes a slightly more aggressive approach to keeping the TLB and cache state sane before establishing the mappings in order to cut down on races observed on SMP configurations. At the same time, we bump the VMA range up to the 0xb000...0xc000 range, as there still seems to be some undocumented behaviour in setting up variable mappings in the 0xa000...0xb000 range, resulting in reset by the TLB. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/mm/pmb.c37
1 files changed, 31 insertions, 6 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 75b8861ec624..30035caeb73a 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -24,6 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <asm/cacheflush.h>
27#include <asm/sizes.h> 28#include <asm/sizes.h>
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
@@ -292,9 +293,18 @@ static void pmb_free(struct pmb_entry *pmbe)
292 */ 293 */
293static void __set_pmb_entry(struct pmb_entry *pmbe) 294static void __set_pmb_entry(struct pmb_entry *pmbe)
294{ 295{
296 unsigned long addr, data;
297
298 addr = mk_pmb_addr(pmbe->entry);
299 data = mk_pmb_data(pmbe->entry);
300
301 jump_to_uncached();
302
295 /* Set V-bit */ 303 /* Set V-bit */
296 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); 304 __raw_writel(pmbe->vpn | PMB_V, addr);
297 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); 305 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
306
307 back_to_cached();
298} 308}
299 309
300static void __clear_pmb_entry(struct pmb_entry *pmbe) 310static void __clear_pmb_entry(struct pmb_entry *pmbe)
@@ -326,6 +336,7 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
326 unsigned long size, pgprot_t prot) 336 unsigned long size, pgprot_t prot)
327{ 337{
328 struct pmb_entry *pmbp, *pmbe; 338 struct pmb_entry *pmbp, *pmbe;
339 unsigned long orig_addr, orig_size;
329 unsigned long flags, pmb_flags; 340 unsigned long flags, pmb_flags;
330 int i, mapped; 341 int i, mapped;
331 342
@@ -334,6 +345,11 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
334 if (pmb_mapping_exists(vaddr, phys, size)) 345 if (pmb_mapping_exists(vaddr, phys, size))
335 return 0; 346 return 0;
336 347
348 orig_addr = vaddr;
349 orig_size = size;
350
351 flush_tlb_kernel_range(vaddr, vaddr + size);
352
337 pmb_flags = pgprot_to_pmb_flags(prot); 353 pmb_flags = pgprot_to_pmb_flags(prot);
338 pmbp = NULL; 354 pmbp = NULL;
339 355
@@ -383,13 +399,15 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
383 } 399 }
384 } while (size >= SZ_16M); 400 } while (size >= SZ_16M);
385 401
402 flush_cache_vmap(orig_addr, orig_addr + orig_size);
403
386 return 0; 404 return 0;
387} 405}
388 406
389void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, 407void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
390 pgprot_t prot, void *caller) 408 pgprot_t prot, void *caller)
391{ 409{
392 unsigned long orig_addr, vaddr; 410 unsigned long vaddr;
393 phys_addr_t offset, last_addr; 411 phys_addr_t offset, last_addr;
394 phys_addr_t align_mask; 412 phys_addr_t align_mask;
395 unsigned long aligned; 413 unsigned long aligned;
@@ -417,19 +435,24 @@ void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
417 phys &= align_mask; 435 phys &= align_mask;
418 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; 436 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
419 437
420 area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end, 438 /*
439 * XXX: This should really start from uncached_end, but this
440 * causes the MMU to reset, so for now we restrict it to the
441 * 0xb000...0xc000 range.
442 */
443 area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
421 P3SEG, caller); 444 P3SEG, caller);
422 if (!area) 445 if (!area)
423 return NULL; 446 return NULL;
424 447
425 area->phys_addr = phys; 448 area->phys_addr = phys;
426 orig_addr = vaddr = (unsigned long)area->addr; 449 vaddr = (unsigned long)area->addr;
427 450
428 ret = pmb_bolt_mapping(vaddr, phys, size, prot); 451 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
429 if (unlikely(ret != 0)) 452 if (unlikely(ret != 0))
430 return ERR_PTR(ret); 453 return ERR_PTR(ret);
431 454
432 return (void __iomem *)(offset + (char *)orig_addr); 455 return (void __iomem *)(offset + (char *)vaddr);
433} 456}
434 457
435int pmb_unmap(void __iomem *addr) 458int pmb_unmap(void __iomem *addr)
@@ -477,6 +500,8 @@ static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
477 */ 500 */
478 __clear_pmb_entry(pmbe); 501 __clear_pmb_entry(pmbe);
479 502
503 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
504
480 pmbe = pmblink->link; 505 pmbe = pmblink->link;
481 506
482 pmb_free(pmblink); 507 pmb_free(pmblink);