diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-24 00:27:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:15 -0400 |
commit | a1f242ff460e4b50a045fa237c3c56cce9eabf83 (patch) | |
tree | 657766b55251042b38967422dc9c3ea893b98747 /include/asm-powerpc/pgtable-ppc32.h | |
parent | 7ae8ed5053a39082d224a3f48409e016baca9c16 (diff) |
powerpc ioremap_prot
This adds ioremap_prot and pte_pgprot() so that one can extract protection
bits from a PTE and use them to ioremap_prot() (in order to support ptrace
of VM_IO | VM_PFNMAP as per Rik's patch).
This moves a couple of flag checks around in the ioremap implementations
of arch/powerpc. There's a side effect of allowing non-cacheable and
non-guarded mappings on ppc32 which before would always have _PAGE_GUARDED
set whenever _PAGE_NO_CACHE is.
(standard ioremap will still set _PAGE_GUARDED, but ioremap_prot will be
capable of setting such a non guarded mapping).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-powerpc/pgtable-ppc32.h')
-rw-r--r-- | include/asm-powerpc/pgtable-ppc32.h | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index 3a96d001cb75..bdbab72f3ebc 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h | |||
@@ -395,6 +395,12 @@ extern int icache_44x_need_flush; | |||
395 | #ifndef _PAGE_EXEC | 395 | #ifndef _PAGE_EXEC |
396 | #define _PAGE_EXEC 0 | 396 | #define _PAGE_EXEC 0 |
397 | #endif | 397 | #endif |
398 | #ifndef _PAGE_ENDIAN | ||
399 | #define _PAGE_ENDIAN 0 | ||
400 | #endif | ||
401 | #ifndef _PAGE_COHERENT | ||
402 | #define _PAGE_COHERENT 0 | ||
403 | #endif | ||
398 | #ifndef _PMD_PRESENT_MASK | 404 | #ifndef _PMD_PRESENT_MASK |
399 | #define _PMD_PRESENT_MASK _PMD_PRESENT | 405 | #define _PMD_PRESENT_MASK _PMD_PRESENT |
400 | #endif | 406 | #endif |
@@ -405,6 +411,12 @@ extern int icache_44x_need_flush; | |||
405 | 411 | ||
406 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 412 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
407 | 413 | ||
414 | |||
415 | #define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ | ||
416 | _PAGE_WRITETHRU | _PAGE_ENDIAN | \ | ||
417 | _PAGE_USER | _PAGE_ACCESSED | \ | ||
418 | _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ | ||
419 | _PAGE_EXEC | _PAGE_HWEXEC) | ||
408 | /* | 420 | /* |
409 | * Note: the _PAGE_COHERENT bit automatically gets set in the hardware | 421 | * Note: the _PAGE_COHERENT bit automatically gets set in the hardware |
410 | * PTE if CONFIG_SMP is defined (hash_page does this); there is no need | 422 | * PTE if CONFIG_SMP is defined (hash_page does this); there is no need |
@@ -538,6 +550,10 @@ static inline pte_t pte_mkyoung(pte_t pte) { | |||
538 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | 550 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
539 | static inline pte_t pte_mkspecial(pte_t pte) { | 551 | static inline pte_t pte_mkspecial(pte_t pte) { |
540 | return pte; } | 552 | return pte; } |
553 | static inline unsigned long pte_pgprot(pte_t pte) | ||
554 | { | ||
555 | return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; | ||
556 | } | ||
541 | 557 | ||
542 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 558 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
543 | { | 559 | { |