diff options
author | Dave Kleikamp <shaggy@linux.vnet.ibm.com> | 2008-07-07 10:28:54 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-09 02:30:45 -0400 |
commit | ef3d3246a0d06be622867d21af25f997aeeb105f (patch) | |
tree | 9f0ae1913e0e637ec3aa104cc5e81557e5661b3c /include/asm-powerpc/mman.h | |
parent | 379070491e1e744a59e69e5bcf3765012d15ecb4 (diff) |
powerpc/mm: Add Strong Access Ordering support
Allow an application to enable Strong Access Ordering on specific pages of
memory on Power 7 hardware. Currently, power has a weaker memory model than
x86. Implementing a stronger memory model allows an emulator to more
efficiently translate x86 code into power code, resulting in faster code
execution.
On Power 7 hardware, storing 0b1110 in the WIMG bits of the hpte enables
strong access ordering mode for the memory page. This patchset allows a
user to specify which pages are thus enabled by passing a new protection
bit through mmap() and mprotect(). I have defined PROT_SAO to be 0x10.
Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'include/asm-powerpc/mman.h')
-rw-r--r-- | include/asm-powerpc/mman.h | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h index 0c46bf2c7d5f..f8a32e20ba04 100644 --- a/include/asm-powerpc/mman.h +++ b/include/asm-powerpc/mman.h | |||
@@ -1,7 +1,9 @@ | |||
1 | #ifndef _ASM_POWERPC_MMAN_H | 1 | #ifndef _ASM_POWERPC_MMAN_H |
2 | #define _ASM_POWERPC_MMAN_H | 2 | #define _ASM_POWERPC_MMAN_H |
3 | 3 | ||
4 | #include <asm/cputable.h> | ||
4 | #include <asm-generic/mman.h> | 5 | #include <asm-generic/mman.h> |
6 | #include <linux/mm.h> | ||
5 | 7 | ||
6 | /* | 8 | /* |
7 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
@@ -26,4 +28,32 @@ | |||
26 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | 28 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ |
27 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | 29 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ |
28 | 30 | ||
31 | #ifdef CONFIG_PPC64 | ||
32 | /* | ||
33 | * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() | ||
34 | * here. How important is the optimization? | ||
35 | */ | ||
36 | static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) | ||
37 | { | ||
38 | return (prot & PROT_SAO) ? VM_SAO : 0; | ||
39 | } | ||
40 | #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) | ||
41 | |||
42 | static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) | ||
43 | { | ||
44 | return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0; | ||
45 | } | ||
46 | #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) | ||
47 | |||
48 | static inline int arch_validate_prot(unsigned long prot) | ||
49 | { | ||
50 | if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) | ||
51 | return 0; | ||
52 | if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) | ||
53 | return 0; | ||
54 | return 1; | ||
55 | } | ||
56 | #define arch_validate_prot(prot) arch_validate_prot(prot) | ||
57 | |||
58 | #endif /* CONFIG_PPC64 */ | ||
29 | #endif /* _ASM_POWERPC_MMAN_H */ | 59 | #endif /* _ASM_POWERPC_MMAN_H */ |