aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-16 23:23:00 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-16 23:23:00 -0500
commit7bdda6209f224aa784a036df54b22cb338d2e859 (patch)
tree2c2ce99f0ec55386246379ffb8412b3a893402b0 /arch/sh/include
parent49f3bfe9334a4cf86079d2ee1d08e674b58862a9 (diff)
sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.
Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include')
-rw-r--r--arch/sh/include/asm/mmu.h5
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sq.h3
2 files changed, 5 insertions, 3 deletions
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 2fcbedb55002..151bc922701b 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -33,6 +33,7 @@
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/threads.h> 35#include <linux/threads.h>
36#include <asm/page.h>
36 37
37/* Default "unsigned long" context */ 38/* Default "unsigned long" context */
38typedef unsigned long mm_context_id_t[NR_CPUS]; 39typedef unsigned long mm_context_id_t[NR_CPUS];
@@ -71,13 +72,13 @@ struct pmb_entry {
71#ifdef CONFIG_PMB 72#ifdef CONFIG_PMB
72/* arch/sh/mm/pmb.c */ 73/* arch/sh/mm/pmb.c */
73long pmb_remap(unsigned long virt, unsigned long phys, 74long pmb_remap(unsigned long virt, unsigned long phys,
74 unsigned long size, unsigned long flags); 75 unsigned long size, pgprot_t prot);
75void pmb_unmap(unsigned long addr); 76void pmb_unmap(unsigned long addr);
76int pmb_init(void); 77int pmb_init(void);
77bool __in_29bit_mode(void); 78bool __in_29bit_mode(void);
78#else 79#else
79static inline long pmb_remap(unsigned long virt, unsigned long phys, 80static inline long pmb_remap(unsigned long virt, unsigned long phys,
80 unsigned long size, unsigned long flags) 81 unsigned long size, pgprot_t prot)
81{ 82{
82 return -EINVAL; 83 return -EINVAL;
83} 84}
diff --git a/arch/sh/include/cpu-sh4/cpu/sq.h b/arch/sh/include/cpu-sh4/cpu/sq.h
index 586d6491816a..74716ba2dc3c 100644
--- a/arch/sh/include/cpu-sh4/cpu/sq.h
+++ b/arch/sh/include/cpu-sh4/cpu/sq.h
@@ -12,6 +12,7 @@
12#define __ASM_CPU_SH4_SQ_H 12#define __ASM_CPU_SH4_SQ_H
13 13
14#include <asm/addrspace.h> 14#include <asm/addrspace.h>
15#include <asm/page.h>
15 16
16/* 17/*
17 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be 18 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
@@ -28,7 +29,7 @@
28 29
29/* arch/sh/kernel/cpu/sh4/sq.c */ 30/* arch/sh/kernel/cpu/sh4/sq.c */
30unsigned long sq_remap(unsigned long phys, unsigned int size, 31unsigned long sq_remap(unsigned long phys, unsigned int size,
31 const char *name, unsigned long flags); 32 const char *name, pgprot_t prot);
32void sq_unmap(unsigned long vaddr); 33void sq_unmap(unsigned long vaddr);
33void sq_flush_range(unsigned long start, unsigned int len); 34void sq_flush_range(unsigned long start, unsigned int len);
34 35