aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-16 23:23:00 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-16 23:23:00 -0500
commit7bdda6209f224aa784a036df54b22cb338d2e859 (patch)
tree2c2ce99f0ec55386246379ffb8412b3a893402b0 /arch/sh/kernel
parent49f3bfe9334a4cf86079d2ee1d08e674b58862a9 (diff)
sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.
Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 97aea9d69b00..fc065f9da6e5 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map)
100 spin_unlock_irq(&sq_mapping_lock); 100 spin_unlock_irq(&sq_mapping_lock);
101} 101}
102 102
103static int __sq_remap(struct sq_mapping *map, unsigned long flags) 103static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
104{ 104{
105#if defined(CONFIG_MMU) 105#if defined(CONFIG_MMU)
106 struct vm_struct *vma; 106 struct vm_struct *vma;
@@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
113 113
114 if (ioremap_page_range((unsigned long)vma->addr, 114 if (ioremap_page_range((unsigned long)vma->addr,
115 (unsigned long)vma->addr + map->size, 115 (unsigned long)vma->addr + map->size,
116 vma->phys_addr, __pgprot(flags))) { 116 vma->phys_addr, prot)) {
117 vunmap(vma->addr); 117 vunmap(vma->addr);
118 return -EAGAIN; 118 return -EAGAIN;
119 } 119 }
@@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
135 * @phys: Physical address of mapping. 135 * @phys: Physical address of mapping.
136 * @size: Length of mapping. 136 * @size: Length of mapping.
137 * @name: User invoking mapping. 137 * @name: User invoking mapping.
138 * @flags: Protection flags. 138 * @prot: Protection bits.
139 * 139 *
140 * Remaps the physical address @phys through the next available store queue 140 * Remaps the physical address @phys through the next available store queue
141 * address of @size length. @name is logged at boot time as well as through 141 * address of @size length. @name is logged at boot time as well as through
142 * the sysfs interface. 142 * the sysfs interface.
143 */ 143 */
144unsigned long sq_remap(unsigned long phys, unsigned int size, 144unsigned long sq_remap(unsigned long phys, unsigned int size,
145 const char *name, unsigned long flags) 145 const char *name, pgprot_t prot)
146{ 146{
147 struct sq_mapping *map; 147 struct sq_mapping *map;
148 unsigned long end; 148 unsigned long end;
@@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
177 177
178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); 178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
179 179
180 ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); 180 ret = __sq_remap(map, prot);
181 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
182 goto out; 182 goto out;
183 183
@@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count)
309 return -EIO; 309 return -EIO;
310 310
311 if (likely(len)) { 311 if (likely(len)) {
312 int ret = sq_remap(base, len, "Userspace", 312 int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
313 pgprot_val(PAGE_SHARED));
314 if (ret < 0) 313 if (ret < 0)
315 return ret; 314 return ret;
316 } else 315 } else