aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-24 00:27:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:15 -0400
commita1f242ff460e4b50a045fa237c3c56cce9eabf83 (patch)
tree657766b55251042b38967422dc9c3ea893b98747
parent7ae8ed5053a39082d224a3f48409e016baca9c16 (diff)
powerpc ioremap_prot
This adds ioremap_prot and pte_pgprot() so that one can extract protection bits from a PTE and use them to ioremap_prot() (in order to support ptrace of VM_IO | VM_PFNMAP as per Rik's patch). This moves a couple of flag checks around in the ioremap implementations of arch/powerpc. There's a side effect of allowing non-cacheable and non-guarded mappings on ppc32 which before would always have _PAGE_GUARDED set whenever _PAGE_NO_CACHE is. (standard ioremap will still set _PAGE_GUARDED, but ioremap_prot will be capable of setting such a non guarded mapping). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Dave Airlie <airlied@linux.ie> Cc: Hugh Dickins <hugh@veritas.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/mm/pgtable_32.c22
-rw-r--r--arch/powerpc/mm/pgtable_64.c16
-rw-r--r--include/asm-powerpc/io.h5
-rw-r--r--include/asm-powerpc/pgtable-4k.h3
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h16
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h8
7 files changed, 64 insertions, 7 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4d7e2ba10bae..a487671c282f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -111,6 +111,7 @@ config PPC
111 select HAVE_DYNAMIC_FTRACE 111 select HAVE_DYNAMIC_FTRACE
112 select HAVE_FTRACE 112 select HAVE_FTRACE
113 select HAVE_IDE 113 select HAVE_IDE
114 select HAVE_IOREMAP_PROT
114 select HAVE_KPROBES 115 select HAVE_KPROBES
115 select HAVE_ARCH_KGDB 116 select HAVE_ARCH_KGDB
116 select HAVE_KRETPROBES 117 select HAVE_KRETPROBES
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index c7584072dfcc..2001abdb1912 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -145,13 +145,20 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage)
145void __iomem * 145void __iomem *
146ioremap(phys_addr_t addr, unsigned long size) 146ioremap(phys_addr_t addr, unsigned long size)
147{ 147{
148 return __ioremap(addr, size, _PAGE_NO_CACHE); 148 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
149} 149}
150EXPORT_SYMBOL(ioremap); 150EXPORT_SYMBOL(ioremap);
151 151
152void __iomem * 152void __iomem *
153ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) 153ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
154{ 154{
155 /* writeable implies dirty for kernel addresses */
156 if (flags & _PAGE_RW)
157 flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
158
159 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
160 flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC);
161
155 return __ioremap(addr, size, flags); 162 return __ioremap(addr, size, flags);
156} 163}
157EXPORT_SYMBOL(ioremap_flags); 164EXPORT_SYMBOL(ioremap_flags);
@@ -163,6 +170,14 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
163 phys_addr_t p; 170 phys_addr_t p;
164 int err; 171 int err;
165 172
173 /* Make sure we have the base flags */
174 if ((flags & _PAGE_PRESENT) == 0)
175 flags |= _PAGE_KERNEL;
176
177 /* Non-cacheable page cannot be coherent */
178 if (flags & _PAGE_NO_CACHE)
179 flags &= ~_PAGE_COHERENT;
180
166 /* 181 /*
167 * Choose an address to map it to. 182 * Choose an address to map it to.
168 * Once the vmalloc system is running, we use it. 183 * Once the vmalloc system is running, we use it.
@@ -219,11 +234,6 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
219 v = (ioremap_bot -= size); 234 v = (ioremap_bot -= size);
220 } 235 }
221 236
222 if ((flags & _PAGE_PRESENT) == 0)
223 flags |= _PAGE_KERNEL;
224 if (flags & _PAGE_NO_CACHE)
225 flags |= _PAGE_GUARDED;
226
227 /* 237 /*
228 * Should check if it is a candidate for a BAT mapping 238 * Should check if it is a candidate for a BAT mapping
229 */ 239 */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3ef0ad2f9ca0..365e61ae5dbc 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -107,9 +107,18 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
107{ 107{
108 unsigned long i; 108 unsigned long i;
109 109
110 /* Make sure we have the base flags */
110 if ((flags & _PAGE_PRESENT) == 0) 111 if ((flags & _PAGE_PRESENT) == 0)
111 flags |= pgprot_val(PAGE_KERNEL); 112 flags |= pgprot_val(PAGE_KERNEL);
112 113
114 /* Non-cacheable page cannot be coherent */
115 if (flags & _PAGE_NO_CACHE)
116 flags &= ~_PAGE_COHERENT;
117
118 /* We don't support the 4K PFN hack with ioremap */
119 if (flags & _PAGE_4K_PFN)
120 return NULL;
121
113 WARN_ON(pa & ~PAGE_MASK); 122 WARN_ON(pa & ~PAGE_MASK);
114 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 123 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
115 WARN_ON(size & ~PAGE_MASK); 124 WARN_ON(size & ~PAGE_MASK);
@@ -190,6 +199,13 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size)
190void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, 199void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
191 unsigned long flags) 200 unsigned long flags)
192{ 201{
202 /* writeable implies dirty for kernel addresses */
203 if (flags & _PAGE_RW)
204 flags |= _PAGE_DIRTY;
205
206 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
207 flags &= ~(_PAGE_USER | _PAGE_EXEC);
208
193 if (ppc_md.ioremap) 209 if (ppc_md.ioremap)
194 return ppc_md.ioremap(addr, size, flags); 210 return ppc_md.ioremap(addr, size, flags);
195 return __ioremap(addr, size, flags); 211 return __ioremap(addr, size, flags);
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 8b627823f5f9..77c7fa025e65 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -617,7 +617,8 @@ static inline void iosync(void)
617 * and can be hooked by the platform via ppc_md 617 * and can be hooked by the platform via ppc_md
618 * 618 *
619 * * ioremap_flags allows to specify the page flags as an argument and can 619 * * ioremap_flags allows to specify the page flags as an argument and can
620 * also be hooked by the platform via ppc_md 620 * also be hooked by the platform via ppc_md. ioremap_prot is the exact
621 * same thing as ioremap_flags.
621 * 622 *
622 * * ioremap_nocache is identical to ioremap 623 * * ioremap_nocache is identical to ioremap
623 * 624 *
@@ -639,6 +640,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
639extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, 640extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
640 unsigned long flags); 641 unsigned long flags);
641#define ioremap_nocache(addr, size) ioremap((addr), (size)) 642#define ioremap_nocache(addr, size) ioremap((addr), (size))
643#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
644
642extern void iounmap(volatile void __iomem *addr); 645extern void iounmap(volatile void __iomem *addr);
643 646
644extern void __iomem *__ioremap(phys_addr_t, unsigned long size, 647extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index fd2090dc1dce..c9601dfb4a1e 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -51,6 +51,9 @@
51#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ 51#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
52 _PAGE_SECONDARY | _PAGE_GROUP_IX) 52 _PAGE_SECONDARY | _PAGE_GROUP_IX)
53 53
54/* There is no 4K PFN hack on 4K pages */
55#define _PAGE_4K_PFN 0
56
54/* PAGE_MASK gives the right answer below, but only by accident */ 57/* PAGE_MASK gives the right answer below, but only by accident */
55/* It should be preserving the high 48 bits and then specifically */ 58/* It should be preserving the high 48 bits and then specifically */
56/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ 59/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 3a96d001cb75..bdbab72f3ebc 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -395,6 +395,12 @@ extern int icache_44x_need_flush;
395#ifndef _PAGE_EXEC 395#ifndef _PAGE_EXEC
396#define _PAGE_EXEC 0 396#define _PAGE_EXEC 0
397#endif 397#endif
398#ifndef _PAGE_ENDIAN
399#define _PAGE_ENDIAN 0
400#endif
401#ifndef _PAGE_COHERENT
402#define _PAGE_COHERENT 0
403#endif
398#ifndef _PMD_PRESENT_MASK 404#ifndef _PMD_PRESENT_MASK
399#define _PMD_PRESENT_MASK _PMD_PRESENT 405#define _PMD_PRESENT_MASK _PMD_PRESENT
400#endif 406#endif
@@ -405,6 +411,12 @@ extern int icache_44x_need_flush;
405 411
406#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 412#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
407 413
414
415#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
416 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
417 _PAGE_USER | _PAGE_ACCESSED | \
418 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
419 _PAGE_EXEC | _PAGE_HWEXEC)
408/* 420/*
409 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware 421 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
410 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 422 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
@@ -538,6 +550,10 @@ static inline pte_t pte_mkyoung(pte_t pte) {
538 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 550 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
539static inline pte_t pte_mkspecial(pte_t pte) { 551static inline pte_t pte_mkspecial(pte_t pte) {
540 return pte; } 552 return pte; }
553static inline unsigned long pte_pgprot(pte_t pte)
554{
555 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
556}
541 557
542static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 558static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
543{ 559{
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index ab98a9c80b28..ba8000352b9a 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -117,6 +117,10 @@
117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
118#define HAVE_PAGE_AGP 118#define HAVE_PAGE_AGP
119 119
120#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \
121 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
122 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
123 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
120/* PTEIDX nibble */ 124/* PTEIDX nibble */
121#define _PTEIDX_SECONDARY 0x8 125#define _PTEIDX_SECONDARY 0x8
122#define _PTEIDX_GROUP_IX 0x7 126#define _PTEIDX_GROUP_IX 0x7
@@ -262,6 +266,10 @@ static inline pte_t pte_mkhuge(pte_t pte) {
262 return pte; } 266 return pte; }
263static inline pte_t pte_mkspecial(pte_t pte) { 267static inline pte_t pte_mkspecial(pte_t pte) {
264 return pte; } 268 return pte; }
269static inline unsigned long pte_pgprot(pte_t pte)
270{
271 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
272}
265 273
266/* Atomic PTE updates */ 274/* Atomic PTE updates */
267static inline unsigned long pte_update(struct mm_struct *mm, 275static inline unsigned long pte_update(struct mm_struct *mm,