diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-02-16 23:23:00 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-02-16 23:23:00 -0500 |
commit | 7bdda6209f224aa784a036df54b22cb338d2e859 (patch) | |
tree | 2c2ce99f0ec55386246379ffb8412b3a893402b0 /arch/sh/mm | |
parent | 49f3bfe9334a4cf86079d2ee1d08e674b58862a9 (diff) |
sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.
Both the store queue API and the PMB remapping take unsigned long for
their pgprot flags, which cuts off the extended protection bits. In the
case of the PMB this isn't really a problem since the cache attribute
bits that we care about are all in the lower 32-bits, but we do it just
to be safe. The store queue remapping on the other hand depends on the
extended prot bits for enabling userspace access to the mappings.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 6 |
2 files changed, 6 insertions, 2 deletions
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 94583c5da855..c68d2d7d00a9 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
80 | if (unlikely(phys_addr >= P1SEG)) { | 80 | if (unlikely(phys_addr >= P1SEG)) { |
81 | unsigned long mapped; | 81 | unsigned long mapped; |
82 | 82 | ||
83 | mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); | 83 | mapped = pmb_remap(addr, phys_addr, size, pgprot); |
84 | if (likely(mapped)) { | 84 | if (likely(mapped)) { |
85 | addr += mapped; | 85 | addr += mapped; |
86 | phys_addr += mapped; | 86 | phys_addr += mapped; |
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index f822f83418e4..509a444a30ab 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/system.h> | 24 | #include <asm/system.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
27 | #include <asm/page.h> | ||
27 | #include <asm/mmu.h> | 28 | #include <asm/mmu.h> |
28 | #include <asm/io.h> | 29 | #include <asm/io.h> |
29 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
@@ -166,12 +167,15 @@ static struct { | |||
166 | }; | 167 | }; |
167 | 168 | ||
168 | long pmb_remap(unsigned long vaddr, unsigned long phys, | 169 | long pmb_remap(unsigned long vaddr, unsigned long phys, |
169 | unsigned long size, unsigned long flags) | 170 | unsigned long size, pgprot_t prot) |
170 | { | 171 | { |
171 | struct pmb_entry *pmbp, *pmbe; | 172 | struct pmb_entry *pmbp, *pmbe; |
172 | unsigned long wanted; | 173 | unsigned long wanted; |
173 | int pmb_flags, i; | 174 | int pmb_flags, i; |
174 | long err; | 175 | long err; |
176 | u64 flags; | ||
177 | |||
178 | flags = pgprot_val(prot); | ||
175 | 179 | ||
176 | /* Convert typical pgprot value to the PMB equivalent */ | 180 | /* Convert typical pgprot value to the PMB equivalent */ |
177 | if (flags & _PAGE_CACHABLE) { | 181 | if (flags & _PAGE_CACHABLE) { |