aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-16 23:23:00 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-16 23:23:00 -0500
commit7bdda6209f224aa784a036df54b22cb338d2e859 (patch)
tree2c2ce99f0ec55386246379ffb8412b3a893402b0 /arch/sh
parent49f3bfe9334a4cf86079d2ee1d08e674b58862a9 (diff)
sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.
Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/mmu.h5
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sq.h3
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c13
-rw-r--r--arch/sh/mm/ioremap.c2
-rw-r--r--arch/sh/mm/pmb.c6
5 files changed, 17 insertions, 12 deletions
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 2fcbedb55002..151bc922701b 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -33,6 +33,7 @@
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/threads.h> 35#include <linux/threads.h>
36#include <asm/page.h>
36 37
37/* Default "unsigned long" context */ 38/* Default "unsigned long" context */
38typedef unsigned long mm_context_id_t[NR_CPUS]; 39typedef unsigned long mm_context_id_t[NR_CPUS];
@@ -71,13 +72,13 @@ struct pmb_entry {
71#ifdef CONFIG_PMB 72#ifdef CONFIG_PMB
72/* arch/sh/mm/pmb.c */ 73/* arch/sh/mm/pmb.c */
73long pmb_remap(unsigned long virt, unsigned long phys, 74long pmb_remap(unsigned long virt, unsigned long phys,
74 unsigned long size, unsigned long flags); 75 unsigned long size, pgprot_t prot);
75void pmb_unmap(unsigned long addr); 76void pmb_unmap(unsigned long addr);
76int pmb_init(void); 77int pmb_init(void);
77bool __in_29bit_mode(void); 78bool __in_29bit_mode(void);
78#else 79#else
79static inline long pmb_remap(unsigned long virt, unsigned long phys, 80static inline long pmb_remap(unsigned long virt, unsigned long phys,
80 unsigned long size, unsigned long flags) 81 unsigned long size, pgprot_t prot)
81{ 82{
82 return -EINVAL; 83 return -EINVAL;
83} 84}
diff --git a/arch/sh/include/cpu-sh4/cpu/sq.h b/arch/sh/include/cpu-sh4/cpu/sq.h
index 586d6491816a..74716ba2dc3c 100644
--- a/arch/sh/include/cpu-sh4/cpu/sq.h
+++ b/arch/sh/include/cpu-sh4/cpu/sq.h
@@ -12,6 +12,7 @@
12#define __ASM_CPU_SH4_SQ_H 12#define __ASM_CPU_SH4_SQ_H
13 13
14#include <asm/addrspace.h> 14#include <asm/addrspace.h>
15#include <asm/page.h>
15 16
16/* 17/*
17 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be 18 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
@@ -28,7 +29,7 @@
28 29
29/* arch/sh/kernel/cpu/sh4/sq.c */ 30/* arch/sh/kernel/cpu/sh4/sq.c */
30unsigned long sq_remap(unsigned long phys, unsigned int size, 31unsigned long sq_remap(unsigned long phys, unsigned int size,
31 const char *name, unsigned long flags); 32 const char *name, pgprot_t prot);
32void sq_unmap(unsigned long vaddr); 33void sq_unmap(unsigned long vaddr);
33void sq_flush_range(unsigned long start, unsigned int len); 34void sq_flush_range(unsigned long start, unsigned int len);
34 35
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 97aea9d69b00..fc065f9da6e5 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map)
100 spin_unlock_irq(&sq_mapping_lock); 100 spin_unlock_irq(&sq_mapping_lock);
101} 101}
102 102
103static int __sq_remap(struct sq_mapping *map, unsigned long flags) 103static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
104{ 104{
105#if defined(CONFIG_MMU) 105#if defined(CONFIG_MMU)
106 struct vm_struct *vma; 106 struct vm_struct *vma;
@@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
113 113
114 if (ioremap_page_range((unsigned long)vma->addr, 114 if (ioremap_page_range((unsigned long)vma->addr,
115 (unsigned long)vma->addr + map->size, 115 (unsigned long)vma->addr + map->size,
116 vma->phys_addr, __pgprot(flags))) { 116 vma->phys_addr, prot)) {
117 vunmap(vma->addr); 117 vunmap(vma->addr);
118 return -EAGAIN; 118 return -EAGAIN;
119 } 119 }
@@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
135 * @phys: Physical address of mapping. 135 * @phys: Physical address of mapping.
136 * @size: Length of mapping. 136 * @size: Length of mapping.
137 * @name: User invoking mapping. 137 * @name: User invoking mapping.
138 * @flags: Protection flags. 138 * @prot: Protection bits.
139 * 139 *
140 * Remaps the physical address @phys through the next available store queue 140 * Remaps the physical address @phys through the next available store queue
141 * address of @size length. @name is logged at boot time as well as through 141 * address of @size length. @name is logged at boot time as well as through
142 * the sysfs interface. 142 * the sysfs interface.
143 */ 143 */
144unsigned long sq_remap(unsigned long phys, unsigned int size, 144unsigned long sq_remap(unsigned long phys, unsigned int size,
145 const char *name, unsigned long flags) 145 const char *name, pgprot_t prot)
146{ 146{
147 struct sq_mapping *map; 147 struct sq_mapping *map;
148 unsigned long end; 148 unsigned long end;
@@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
177 177
178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); 178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
179 179
180 ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); 180 ret = __sq_remap(map, prot);
181 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
182 goto out; 182 goto out;
183 183
@@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count)
309 return -EIO; 309 return -EIO;
310 310
311 if (likely(len)) { 311 if (likely(len)) {
312 int ret = sq_remap(base, len, "Userspace", 312 int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
313 pgprot_val(PAGE_SHARED));
314 if (ret < 0) 313 if (ret < 0)
315 return ret; 314 return ret;
316 } else 315 } else
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index 94583c5da855..c68d2d7d00a9 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
80 if (unlikely(phys_addr >= P1SEG)) { 80 if (unlikely(phys_addr >= P1SEG)) {
81 unsigned long mapped; 81 unsigned long mapped;
82 82
83 mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); 83 mapped = pmb_remap(addr, phys_addr, size, pgprot);
84 if (likely(mapped)) { 84 if (likely(mapped)) {
85 addr += mapped; 85 addr += mapped;
86 phys_addr += mapped; 86 phys_addr += mapped;
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index f822f83418e4..509a444a30ab 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -24,6 +24,7 @@
24#include <asm/system.h> 24#include <asm/system.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/page.h>
27#include <asm/mmu.h> 28#include <asm/mmu.h>
28#include <asm/io.h> 29#include <asm/io.h>
29#include <asm/mmu_context.h> 30#include <asm/mmu_context.h>
@@ -166,12 +167,15 @@ static struct {
166}; 167};
167 168
168long pmb_remap(unsigned long vaddr, unsigned long phys, 169long pmb_remap(unsigned long vaddr, unsigned long phys,
169 unsigned long size, unsigned long flags) 170 unsigned long size, pgprot_t prot)
170{ 171{
171 struct pmb_entry *pmbp, *pmbe; 172 struct pmb_entry *pmbp, *pmbe;
172 unsigned long wanted; 173 unsigned long wanted;
173 int pmb_flags, i; 174 int pmb_flags, i;
174 long err; 175 long err;
176 u64 flags;
177
178 flags = pgprot_val(prot);
175 179
176 /* Convert typical pgprot value to the PMB equivalent */ 180 /* Convert typical pgprot value to the PMB equivalent */
177 if (flags & _PAGE_CACHABLE) { 181 if (flags & _PAGE_CACHABLE) {