aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/subpage-prot.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-23 16:35:13 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-23 18:06:01 -0500
commitfa28237cfcc5827553044cbd6ee52e33692b0faa (patch)
tree2e34678548e5323eef7392a94a7415e1754cbd1e /arch/powerpc/mm/subpage-prot.c
parent0a0a5af30b9831e4f049610b5a2d9d5108ff027a (diff)
[POWERPC] Provide a way to protect 4k subpages when using 64k pages
Using 64k pages on 64-bit PowerPC systems makes life difficult for emulators that are trying to emulate an ISA, such as x86, which use a smaller page size, since the emulator can no longer use the MMU and the normal system calls for controlling page protections. Of course, the emulator can emulate the MMU by checking and possibly remapping the address for each memory access in software, but that is pretty slow. This provides a facility for such programs to control the access permissions on individual 4k sub-pages of 64k pages. The idea is that the emulator supplies an array of protection masks to apply to a specified range of virtual addresses. These masks are applied at the level where hardware PTEs are inserted into the hardware page table based on the Linux PTEs, so the Linux PTEs are not affected. Note that this new mechanism does not allow any access that would otherwise be prohibited; it can only prohibit accesses that would otherwise be allowed. This new facility is only available on 64-bit PowerPC and only when the kernel is configured for 64k pages. The masks are supplied using a new subpage_prot system call, which takes a starting virtual address and length, and a pointer to an array of protection masks in memory. The array has a 32-bit word per 64k page to be protected; each 32-bit word consists of 16 2-bit fields, for which 0 allows any access (that is otherwise allowed), 1 prevents write accesses, and 2 or 3 prevent any access. Implicit in this is that the regions of the address space that are protected are switched to use 4k hardware pages rather than 64k hardware pages (on machines with hardware 64k page support). In fact the whole process is switched to use 4k hardware pages when the subpage_prot system call is used, but this could be improved in future to switch only the affected segments. The subpage protection bits are stored in a 3 level tree akin to the page table tree. The top level of this tree is stored in a structure that is appended to the top level of the page table tree, i.e., the pgd array. Since it will often only be 32-bit addresses (below 4GB) that are protected, the pointers to the first four bottom level pages are also stored in this structure (each bottom level page contains the protection bits for 1GB of address space), so the protection bits for addresses below 4GB can be accessed with one fewer loads than those for higher addresses. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/subpage-prot.c')
-rw-r--r--arch/powerpc/mm/subpage-prot.c213
1 files changed, 213 insertions, 0 deletions
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
new file mode 100644
index 000000000000..4cafc0c33d0a
--- /dev/null
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -0,0 +1,213 @@
1/*
2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/gfp.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/hugetlb.h>
17
18#include <asm/pgtable.h>
19#include <asm/uaccess.h>
20#include <asm/tlbflush.h>
21
22/*
23 * Free all pages allocated for subpage protection maps and pointers.
24 * Also makes sure that the subpage_prot_table structure is
25 * reinitialized for the next user.
26 */
27void subpage_prot_free(pgd_t *pgd)
28{
29 struct subpage_prot_table *spt = pgd_subpage_prot(pgd);
30 unsigned long i, j, addr;
31 u32 **p;
32
33 for (i = 0; i < 4; ++i) {
34 if (spt->low_prot[i]) {
35 free_page((unsigned long)spt->low_prot[i]);
36 spt->low_prot[i] = NULL;
37 }
38 }
39 addr = 0;
40 for (i = 0; i < 2; ++i) {
41 p = spt->protptrs[i];
42 if (!p)
43 continue;
44 spt->protptrs[i] = NULL;
45 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
46 ++j, addr += PAGE_SIZE)
47 if (p[j])
48 free_page((unsigned long)p[j]);
49 free_page((unsigned long)p);
50 }
51 spt->maxaddr = 0;
52}
53
54static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
55 int npages)
56{
57 pgd_t *pgd;
58 pud_t *pud;
59 pmd_t *pmd;
60 pte_t *pte;
61 spinlock_t *ptl;
62
63 pgd = pgd_offset(mm, addr);
64 if (pgd_none(*pgd))
65 return;
66 pud = pud_offset(pgd, addr);
67 if (pud_none(*pud))
68 return;
69 pmd = pmd_offset(pud, addr);
70 if (pmd_none(*pmd))
71 return;
72 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
73 arch_enter_lazy_mmu_mode();
74 for (; npages > 0; --npages) {
75 pte_update(mm, addr, pte, 0, 0);
76 addr += PAGE_SIZE;
77 ++pte;
78 }
79 arch_leave_lazy_mmu_mode();
80 pte_unmap_unlock(pte - 1, ptl);
81}
82
83/*
84 * Clear the subpage protection map for an address range, allowing
85 * all accesses that are allowed by the pte permissions.
86 */
87static void subpage_prot_clear(unsigned long addr, unsigned long len)
88{
89 struct mm_struct *mm = current->mm;
90 struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
91 u32 **spm, *spp;
92 int i, nw;
93 unsigned long next, limit;
94
95 down_write(&mm->mmap_sem);
96 limit = addr + len;
97 if (limit > spt->maxaddr)
98 limit = spt->maxaddr;
99 for (; addr < limit; addr = next) {
100 next = pmd_addr_end(addr, limit);
101 if (addr < 0x100000000) {
102 spm = spt->low_prot;
103 } else {
104 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
105 if (!spm)
106 continue;
107 }
108 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
109 if (!spp)
110 continue;
111 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
112
113 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
114 nw = PTRS_PER_PTE - i;
115 if (addr + (nw << PAGE_SHIFT) > next)
116 nw = (next - addr) >> PAGE_SHIFT;
117
118 memset(spp, 0, nw * sizeof(u32));
119
120 /* now flush any existing HPTEs for the range */
121 hpte_flush_range(mm, addr, nw);
122 }
123 up_write(&mm->mmap_sem);
124}
125
126/*
127 * Copy in a subpage protection map for an address range.
128 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
129 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
130 * 2 or 3 to prevent all accesses.
131 * Note that the normal page protections also apply; the subpage
132 * protection mechanism is an additional constraint, so putting 0
133 * in a 2-bit field won't allow writes to a page that is otherwise
134 * write-protected.
135 */
136long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
137{
138 struct mm_struct *mm = current->mm;
139 struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
140 u32 **spm, *spp;
141 int i, nw;
142 unsigned long next, limit;
143 int err;
144
145 /* Check parameters */
146 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
147 addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
148 return -EINVAL;
149
150 if (is_hugepage_only_range(mm, addr, len))
151 return -EINVAL;
152
153 if (!map) {
154 /* Clear out the protection map for the address range */
155 subpage_prot_clear(addr, len);
156 return 0;
157 }
158
159 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
160 return -EFAULT;
161
162 down_write(&mm->mmap_sem);
163 for (limit = addr + len; addr < limit; addr = next) {
164 next = pmd_addr_end(addr, limit);
165 err = -ENOMEM;
166 if (addr < 0x100000000) {
167 spm = spt->low_prot;
168 } else {
169 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
170 if (!spm) {
171 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
172 if (!spm)
173 goto out;
174 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
175 }
176 }
177 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
178 spp = *spm;
179 if (!spp) {
180 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
181 if (!spp)
182 goto out;
183 *spm = spp;
184 }
185 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
186
187 local_irq_disable();
188 demote_segment_4k(mm, addr);
189 local_irq_enable();
190
191 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
192 nw = PTRS_PER_PTE - i;
193 if (addr + (nw << PAGE_SHIFT) > next)
194 nw = (next - addr) >> PAGE_SHIFT;
195
196 up_write(&mm->mmap_sem);
197 err = -EFAULT;
198 if (__copy_from_user(spp, map, nw * sizeof(u32)))
199 goto out2;
200 map += nw;
201 down_write(&mm->mmap_sem);
202
203 /* now flush any existing HPTEs for the range */
204 hpte_flush_range(mm, addr, nw);
205 }
206 if (limit > spt->maxaddr)
207 spt->maxaddr = limit;
208 err = 0;
209 out:
210 up_write(&mm->mmap_sem);
211 out2:
212 return err;
213}