aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-23 16:35:13 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-23 18:06:01 -0500
commitfa28237cfcc5827553044cbd6ee52e33692b0faa (patch)
tree2e34678548e5323eef7392a94a7415e1754cbd1e /include/asm-powerpc
parent0a0a5af30b9831e4f049610b5a2d9d5108ff027a (diff)
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for emulators that are trying to emulate an ISA, such as x86, which use a smaller page size, since the emulator can no longer use the MMU and the normal system calls for controlling page protections. Of course, the emulator can emulate the MMU by checking and possibly remapping the address for each memory access in software, but that is pretty slow. This provides a facility for such programs to control the access permissions on individual 4k sub-pages of 64k pages. The idea is that the emulator supplies an array of protection masks to apply to a specified range of virtual addresses. These masks are applied at the level where hardware PTEs are inserted into the hardware page table based on the Linux PTEs, so the Linux PTEs are not affected. Note that this new mechanism does not allow any access that would otherwise be prohibited; it can only prohibit accesses that would otherwise be allowed. This new facility is only available on 64-bit PowerPC and only when the kernel is configured for 64k pages. The masks are supplied using a new subpage_prot system call, which takes a starting virtual address and length, and a pointer to an array of protection masks in memory. The array has a 32-bit word per 64k page to be protected; each 32-bit word consists of 16 2-bit fields, for which 0 allows any access (that is otherwise allowed), 1 prevents write accesses, and 2 or 3 prevent any access. Implicit in this is that the regions of the address space that are protected are switched to use 4k hardware pages rather than 64k hardware pages (on machines with hardware 64k page support). In fact the whole process is switched to use 4k hardware pages when the subpage_prot system call is used, but this could be improved in future to switch only the affected segments. The subpage protection bits are stored in a 3 level tree akin to the page table tree. The top level of this tree is stored in a structure that is appended to the top level of the page table tree, i.e., the pgd array. Since it will often only be 32-bit addresses (below 4GB) that are protected, the pointers to the first four bottom level pages are also stored in this structure (each bottom level page contains the protection bits for 1GB of address space), so the protection bits for addresses below 4GB can be accessed with one fewer loads than those for higher addresses. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/mmu-hash64.h3
-rw-r--r--include/asm-powerpc/pgalloc-64.h5
-rw-r--r--include/asm-powerpc/pgtable-64k.h39
-rw-r--r--include/asm-powerpc/systbl.h1
-rw-r--r--include/asm-powerpc/unistd.h3
5 files changed, 48 insertions, 3 deletions
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index 2a1b4040e20d..2864fa3989ea 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -265,7 +265,7 @@ static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
265 265
266extern int __hash_page_4K(unsigned long ea, unsigned long access, 266extern int __hash_page_4K(unsigned long ea, unsigned long access,
267 unsigned long vsid, pte_t *ptep, unsigned long trap, 267 unsigned long vsid, pte_t *ptep, unsigned long trap,
268 unsigned int local, int ssize); 268 unsigned int local, int ssize, int subpage_prot);
269extern int __hash_page_64K(unsigned long ea, unsigned long access, 269extern int __hash_page_64K(unsigned long ea, unsigned long access,
270 unsigned long vsid, pte_t *ptep, unsigned long trap, 270 unsigned long vsid, pte_t *ptep, unsigned long trap,
271 unsigned int local, int ssize); 271 unsigned int local, int ssize);
@@ -279,6 +279,7 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
279 unsigned long pstart, unsigned long mode, 279 unsigned long pstart, unsigned long mode,
280 int psize, int ssize); 280 int psize, int ssize);
281extern void set_huge_psize(int psize); 281extern void set_huge_psize(int psize);
282extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
282 283
283extern void htab_initialize(void); 284extern void htab_initialize(void);
284extern void htab_initialize_secondary(void); 285extern void htab_initialize_secondary(void);
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
index 94d0294341d6..43214c8085b7 100644
--- a/include/asm-powerpc/pgalloc-64.h
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -12,6 +12,10 @@
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14 14
15#ifndef CONFIG_PPC_SUBPAGE_PROT
16static inline void subpage_prot_free(pgd_t *pgd) {}
17#endif
18
15extern struct kmem_cache *pgtable_cache[]; 19extern struct kmem_cache *pgtable_cache[];
16 20
17#define PGD_CACHE_NUM 0 21#define PGD_CACHE_NUM 0
@@ -27,6 +31,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
27 31
28static inline void pgd_free(pgd_t *pgd) 32static inline void pgd_free(pgd_t *pgd)
29{ 33{
34 subpage_prot_free(pgd);
30 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); 35 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
31} 36}
32 37
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index bd54b772fbc6..1cbd6b377eea 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -13,12 +13,49 @@
13#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) 13#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
14#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 14#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
15#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 15#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
16#endif /* __ASSEMBLY__ */
17 16
18#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 17#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
19#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 18#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
20#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 19#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
21 20
21#ifdef CONFIG_PPC_SUBPAGE_PROT
22/*
23 * For the sub-page protection option, we extend the PGD with one of
24 * these. Basically we have a 3-level tree, with the top level being
25 * the protptrs array. To optimize speed and memory consumption when
26 * only addresses < 4GB are being protected, pointers to the first
27 * four pages of sub-page protection words are stored in the low_prot
28 * array.
29 * Each page of sub-page protection words protects 1GB (4 bytes
30 * protects 64k). For the 3-level tree, each page of pointers then
31 * protects 8TB.
32 */
33struct subpage_prot_table {
34 unsigned long maxaddr; /* only addresses < this are protected */
35 unsigned int **protptrs[2];
36 unsigned int *low_prot[4];
37};
38
39#undef PGD_TABLE_SIZE
40#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
41 sizeof(struct subpage_prot_table))
42
43#define SBP_L1_BITS (PAGE_SHIFT - 2)
44#define SBP_L2_BITS (PAGE_SHIFT - 3)
45#define SBP_L1_COUNT (1 << SBP_L1_BITS)
46#define SBP_L2_COUNT (1 << SBP_L2_BITS)
47#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
48#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
49
50extern void subpage_prot_free(pgd_t *pgd);
51
52static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
53{
54 return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
55}
56#endif /* CONFIG_PPC_SUBPAGE_PROT */
57#endif /* __ASSEMBLY__ */
58
22/* With 4k base page size, hugepage PTEs go at the PMD level */ 59/* With 4k base page size, hugepage PTEs go at the PMD level */
23#define MIN_HUGEPTE_SHIFT PAGE_SHIFT 60#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
24 61
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index 11d5383b2f09..0c8b0d679139 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -313,3 +313,4 @@ COMPAT_SYS_SPU(timerfd)
313SYSCALL_SPU(eventfd) 313SYSCALL_SPU(eventfd)
314COMPAT_SYS_SPU(sync_file_range2) 314COMPAT_SYS_SPU(sync_file_range2)
315COMPAT_SYS(fallocate) 315COMPAT_SYS(fallocate)
316SYSCALL(subpage_prot)
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 97d82b6a9406..fedc4b8e49e2 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -332,10 +332,11 @@
332#define __NR_eventfd 307 332#define __NR_eventfd 307
333#define __NR_sync_file_range2 308 333#define __NR_sync_file_range2 308
334#define __NR_fallocate 309 334#define __NR_fallocate 309
335#define __NR_subpage_prot 310
335 336
336#ifdef __KERNEL__ 337#ifdef __KERNEL__
337 338
338#define __NR_syscalls 310 339#define __NR_syscalls 311
339 340
340#define __NR__exit __NR_exit 341#define __NR__exit __NR_exit
341#define NR_syscalls __NR_syscalls 342#define NR_syscalls __NR_syscalls