diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2010-09-13 11:03:21 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-11-04 11:44:31 -0400 |
commit | 247055aa21ffef1c49dd64710d5e94c2aee19b58 (patch) | |
tree | e9e026b96597d080de4c16bb88c17b0495c61904 /arch/arm/mm/mmu.c | |
parent | ff8b16d7e15a8ba2a6086645614a483e048e3fbf (diff) |
ARM: 6384/1: Remove the domain switching on ARMv6k/v7 CPUs
This patch removes the domain switching functionality via the set_fs and
__switch_to functions on cores that have a TLS register.
Currently, the ioremap and vmalloc areas share the same level 1 page
tables and therefore have the same domain (DOMAIN_KERNEL). When the
kernel domain is modified from Client to Manager (via the __set_fs or in
the __switch_to function), the XN (eXecute Never) bit is overridden and
newer CPUs can speculatively prefetch the ioremap'ed memory.
Linux performs the kernel domain switching to allow user-specific
functions (copy_to/from_user, get/put_user etc.) to access kernel
memory. In order for these functions to work with the kernel domain set
to Client, the patch modifies the LDRT/STRT and related instructions to
the LDR/STR ones.
The user pages access rights are also modified for kernel read-only
access rather than read/write so that the copy-on-write mechanism still
works. CPU_USE_DOMAINS gets disabled only if the hardware has a TLS register
(CPU_32v6K is defined) since writing the TLS value to the high vectors page
isn't possible.
The user addresses passed to the kernel are checked by the access_ok()
function so that they do not point to the kernel space.
Tested-by: Anton Vorontsov <cbouatmailru@gmail.com>
Cc: Tony Lindgren <tony@atomide.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 72ad3e1f56cf..79c01f540cbe 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/smp_plat.h> | 24 | #include <asm/smp_plat.h> |
25 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
26 | #include <asm/highmem.h> | 26 | #include <asm/highmem.h> |
27 | #include <asm/traps.h> | ||
27 | 28 | ||
28 | #include <asm/mach/arch.h> | 29 | #include <asm/mach/arch.h> |
29 | #include <asm/mach/map.h> | 30 | #include <asm/mach/map.h> |
@@ -914,12 +915,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
914 | { | 915 | { |
915 | struct map_desc map; | 916 | struct map_desc map; |
916 | unsigned long addr; | 917 | unsigned long addr; |
917 | void *vectors; | ||
918 | 918 | ||
919 | /* | 919 | /* |
920 | * Allocate the vector page early. | 920 | * Allocate the vector page early. |
921 | */ | 921 | */ |
922 | vectors = early_alloc(PAGE_SIZE); | 922 | vectors_page = early_alloc(PAGE_SIZE); |
923 | 923 | ||
924 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | 924 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
925 | pmd_clear(pmd_off_k(addr)); | 925 | pmd_clear(pmd_off_k(addr)); |
@@ -959,7 +959,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
959 | * location (0xffff0000). If we aren't using high-vectors, also | 959 | * location (0xffff0000). If we aren't using high-vectors, also |
960 | * create a mapping at the low-vectors virtual address. | 960 | * create a mapping at the low-vectors virtual address. |
961 | */ | 961 | */ |
962 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 962 | map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); |
963 | map.virtual = 0xffff0000; | 963 | map.virtual = 0xffff0000; |
964 | map.length = PAGE_SIZE; | 964 | map.length = PAGE_SIZE; |
965 | map.type = MT_HIGH_VECTORS; | 965 | map.type = MT_HIGH_VECTORS; |