diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2012-03-05 06:49:27 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2012-09-17 08:41:57 -0400 |
commit | 1d18c47c735e8adfe531fc41fae31e98f86b68fe (patch) | |
tree | b63f253304e2d955e1a17d12e4f0dc94312f58ea /arch/arm64 | |
parent | c1cc1552616d0f354d040823151e61634e7ad01f (diff) |
arm64: MMU fault handling and page table management
This patch adds support for the handling of the MMU faults (exception
entry code introduced by a previous patch) and page table management.
The user translation table is pointed to by TTBR0 and the kernel one
(swapper_pg_dir) by TTBR1. There is no translation information shared or
address space overlapping between user and kernel page tables.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/page.h | 67 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgalloc.h | 113 | ||||
-rw-r--r-- | arch/arm64/mm/copypage.c | 34 | ||||
-rw-r--r-- | arch/arm64/mm/extable.c | 17 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 534 | ||||
-rw-r--r-- | arch/arm64/mm/mm.h | 2 | ||||
-rw-r--r-- | arch/arm64/mm/mmap.c | 144 | ||||
-rw-r--r-- | arch/arm64/mm/pgd.c | 54 |
8 files changed, 965 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h new file mode 100644 index 000000000000..46bf66628b6a --- /dev/null +++ b/arch/arm64/include/asm/page.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/include/asm/page.h | ||
3 | * | ||
4 | * Copyright (C) 1995-2003 Russell King | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #ifndef __ASM_PAGE_H | ||
20 | #define __ASM_PAGE_H | ||
21 | |||
22 | /* PAGE_SHIFT determines the page size */ | ||
23 | #ifdef CONFIG_ARM64_64K_PAGES | ||
24 | #define PAGE_SHIFT 16 | ||
25 | #else | ||
26 | #define PAGE_SHIFT 12 | ||
27 | #endif | ||
28 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | ||
29 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
30 | |||
31 | /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ | ||
32 | #define __HAVE_ARCH_GATE_AREA 1 | ||
33 | |||
34 | #ifndef __ASSEMBLY__ | ||
35 | |||
36 | #ifdef CONFIG_ARM64_64K_PAGES | ||
37 | #include <asm/pgtable-2level-types.h> | ||
38 | #else | ||
39 | #include <asm/pgtable-3level-types.h> | ||
40 | #endif | ||
41 | |||
42 | extern void __cpu_clear_user_page(void *p, unsigned long user); | ||
43 | extern void __cpu_copy_user_page(void *to, const void *from, | ||
44 | unsigned long user); | ||
45 | extern void copy_page(void *to, const void *from); | ||
46 | extern void clear_page(void *to); | ||
47 | |||
48 | #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) | ||
49 | #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) | ||
50 | |||
51 | typedef struct page *pgtable_t; | ||
52 | |||
53 | #ifdef CONFIG_HAVE_ARCH_PFN_VALID | ||
54 | extern int pfn_valid(unsigned long); | ||
55 | #endif | ||
56 | |||
57 | #include <asm/memory.h> | ||
58 | |||
59 | #endif /* !__ASSEMBLY__ */ | ||
60 | |||
61 | #define VM_DATA_DEFAULT_FLAGS \ | ||
62 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ | ||
63 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
64 | |||
65 | #include <asm-generic/getorder.h> | ||
66 | |||
67 | #endif | ||
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h new file mode 100644 index 000000000000..f214069ec5d5 --- /dev/null +++ b/arch/arm64/include/asm/pgalloc.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/include/asm/pgalloc.h | ||
3 | * | ||
4 | * Copyright (C) 2000-2001 Russell King | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #ifndef __ASM_PGALLOC_H | ||
20 | #define __ASM_PGALLOC_H | ||
21 | |||
22 | #include <asm/pgtable-hwdef.h> | ||
23 | #include <asm/processor.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | #include <asm/tlbflush.h> | ||
26 | |||
27 | #define check_pgt_cache() do { } while (0) | ||
28 | |||
29 | #ifndef CONFIG_ARM64_64K_PAGES | ||
30 | |||
31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
32 | { | ||
33 | return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | ||
34 | } | ||
35 | |||
36 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | ||
37 | { | ||
38 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | ||
39 | free_page((unsigned long)pmd); | ||
40 | } | ||
41 | |||
42 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | ||
43 | { | ||
44 | set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); | ||
45 | } | ||
46 | |||
47 | #endif /* CONFIG_ARM64_64K_PAGES */ | ||
48 | |||
49 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | ||
50 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | ||
51 | |||
52 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | ||
53 | |||
54 | static inline pte_t * | ||
55 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | ||
56 | { | ||
57 | return (pte_t *)__get_free_page(PGALLOC_GFP); | ||
58 | } | ||
59 | |||
60 | static inline pgtable_t | ||
61 | pte_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
62 | { | ||
63 | struct page *pte; | ||
64 | |||
65 | pte = alloc_pages(PGALLOC_GFP, 0); | ||
66 | if (pte) | ||
67 | pgtable_page_ctor(pte); | ||
68 | |||
69 | return pte; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Free a PTE table. | ||
74 | */ | ||
75 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
76 | { | ||
77 | if (pte) | ||
78 | free_page((unsigned long)pte); | ||
79 | } | ||
80 | |||
81 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | ||
82 | { | ||
83 | pgtable_page_dtor(pte); | ||
84 | __free_page(pte); | ||
85 | } | ||
86 | |||
87 | static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, | ||
88 | pmdval_t prot) | ||
89 | { | ||
90 | set_pmd(pmdp, __pmd(pte | prot)); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Populate the pmdp entry with a pointer to the pte. This pmd is part | ||
95 | * of the mm address space. | ||
96 | */ | ||
97 | static inline void | ||
98 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) | ||
99 | { | ||
100 | /* | ||
101 | * The pmd must be loaded with the physical address of the PTE table | ||
102 | */ | ||
103 | __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE); | ||
104 | } | ||
105 | |||
106 | static inline void | ||
107 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) | ||
108 | { | ||
109 | __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE); | ||
110 | } | ||
111 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
112 | |||
113 | #endif | ||
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c new file mode 100644 index 000000000000..9aecbace4128 --- /dev/null +++ b/arch/arm64/mm/copypage.c | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/mm/copypage.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/mm.h> | ||
21 | |||
22 | #include <asm/page.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | ||
26 | { | ||
27 | copy_page(kto, kfrom); | ||
28 | __flush_dcache_area(kto, PAGE_SIZE); | ||
29 | } | ||
30 | |||
31 | void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) | ||
32 | { | ||
33 | clear_page(kaddr); | ||
34 | } | ||
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c new file mode 100644 index 000000000000..79444279ba8c --- /dev/null +++ b/arch/arm64/mm/extable.c | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/mm/extable.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/module.h> | ||
6 | #include <linux/uaccess.h> | ||
7 | |||
8 | int fixup_exception(struct pt_regs *regs) | ||
9 | { | ||
10 | const struct exception_table_entry *fixup; | ||
11 | |||
12 | fixup = search_exception_tables(instruction_pointer(regs)); | ||
13 | if (fixup) | ||
14 | regs->pc = fixup->fixup; | ||
15 | |||
16 | return fixup != NULL; | ||
17 | } | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c new file mode 100644 index 000000000000..1909a69983ca --- /dev/null +++ b/arch/arm64/mm/fault.c | |||
@@ -0,0 +1,534 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/mm/fault.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Copyright (C) 1995-2004 Russell King | ||
6 | * Copyright (C) 2012 ARM Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/signal.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/hardirq.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/kprobes.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/page-flags.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <linux/perf_event.h> | ||
32 | |||
33 | #include <asm/exception.h> | ||
34 | #include <asm/debug-monitors.h> | ||
35 | #include <asm/system_misc.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/tlbflush.h> | ||
38 | |||
39 | /* | ||
40 | * Dump out the page tables associated with 'addr' in mm 'mm'. | ||
41 | */ | ||
42 | void show_pte(struct mm_struct *mm, unsigned long addr) | ||
43 | { | ||
44 | pgd_t *pgd; | ||
45 | |||
46 | if (!mm) | ||
47 | mm = &init_mm; | ||
48 | |||
49 | pr_alert("pgd = %p\n", mm->pgd); | ||
50 | pgd = pgd_offset(mm, addr); | ||
51 | pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd)); | ||
52 | |||
53 | do { | ||
54 | pud_t *pud; | ||
55 | pmd_t *pmd; | ||
56 | pte_t *pte; | ||
57 | |||
58 | if (pgd_none_or_clear_bad(pgd)) | ||
59 | break; | ||
60 | |||
61 | pud = pud_offset(pgd, addr); | ||
62 | if (pud_none_or_clear_bad(pud)) | ||
63 | break; | ||
64 | |||
65 | pmd = pmd_offset(pud, addr); | ||
66 | printk(", *pmd=%016llx", pmd_val(*pmd)); | ||
67 | if (pmd_none_or_clear_bad(pmd)) | ||
68 | break; | ||
69 | |||
70 | pte = pte_offset_map(pmd, addr); | ||
71 | printk(", *pte=%016llx", pte_val(*pte)); | ||
72 | pte_unmap(pte); | ||
73 | } while(0); | ||
74 | |||
75 | printk("\n"); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * The kernel tried to access some page that wasn't present. | ||
80 | */ | ||
81 | static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, | ||
82 | unsigned int esr, struct pt_regs *regs) | ||
83 | { | ||
84 | /* | ||
85 | * Are we prepared to handle this kernel fault? | ||
86 | */ | ||
87 | if (fixup_exception(regs)) | ||
88 | return; | ||
89 | |||
90 | /* | ||
91 | * No handler, we'll have to terminate things with extreme prejudice. | ||
92 | */ | ||
93 | bust_spinlocks(1); | ||
94 | pr_alert("Unable to handle kernel %s at virtual address %08lx\n", | ||
95 | (addr < PAGE_SIZE) ? "NULL pointer dereference" : | ||
96 | "paging request", addr); | ||
97 | |||
98 | show_pte(mm, addr); | ||
99 | die("Oops", regs, esr); | ||
100 | bust_spinlocks(0); | ||
101 | do_exit(SIGKILL); | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Something tried to access memory that isn't in our memory map. User mode | ||
106 | * accesses just cause a SIGSEGV | ||
107 | */ | ||
108 | static void __do_user_fault(struct task_struct *tsk, unsigned long addr, | ||
109 | unsigned int esr, unsigned int sig, int code, | ||
110 | struct pt_regs *regs) | ||
111 | { | ||
112 | struct siginfo si; | ||
113 | |||
114 | if (show_unhandled_signals) { | ||
115 | pr_info("%s[%d]: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", | ||
116 | tsk->comm, task_pid_nr(tsk), sig, addr, esr); | ||
117 | show_pte(tsk->mm, addr); | ||
118 | show_regs(regs); | ||
119 | } | ||
120 | |||
121 | tsk->thread.fault_address = addr; | ||
122 | si.si_signo = sig; | ||
123 | si.si_errno = 0; | ||
124 | si.si_code = code; | ||
125 | si.si_addr = (void __user *)addr; | ||
126 | force_sig_info(sig, &si, tsk); | ||
127 | } | ||
128 | |||
129 | void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) | ||
130 | { | ||
131 | struct task_struct *tsk = current; | ||
132 | struct mm_struct *mm = tsk->active_mm; | ||
133 | |||
134 | /* | ||
135 | * If we are in kernel mode at this point, we have no context to | ||
136 | * handle this fault with. | ||
137 | */ | ||
138 | if (user_mode(regs)) | ||
139 | __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); | ||
140 | else | ||
141 | __do_kernel_fault(mm, addr, esr, regs); | ||
142 | } | ||
143 | |||
144 | #define VM_FAULT_BADMAP 0x010000 | ||
145 | #define VM_FAULT_BADACCESS 0x020000 | ||
146 | |||
147 | #define ESR_WRITE (1 << 6) | ||
148 | #define ESR_LNX_EXEC (1 << 24) | ||
149 | |||
150 | /* | ||
151 | * Check that the permissions on the VMA allow for the fault which occurred. | ||
152 | * If we encountered a write fault, we must have write permission, otherwise | ||
153 | * we allow any permission. | ||
154 | */ | ||
155 | static inline bool access_error(unsigned int esr, struct vm_area_struct *vma) | ||
156 | { | ||
157 | unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; | ||
158 | |||
159 | if (esr & ESR_WRITE) | ||
160 | mask = VM_WRITE; | ||
161 | if (esr & ESR_LNX_EXEC) | ||
162 | mask = VM_EXEC; | ||
163 | |||
164 | return vma->vm_flags & mask ? false : true; | ||
165 | } | ||
166 | |||
167 | static int __do_page_fault(struct mm_struct *mm, unsigned long addr, | ||
168 | unsigned int esr, unsigned int flags, | ||
169 | struct task_struct *tsk) | ||
170 | { | ||
171 | struct vm_area_struct *vma; | ||
172 | int fault; | ||
173 | |||
174 | vma = find_vma(mm, addr); | ||
175 | fault = VM_FAULT_BADMAP; | ||
176 | if (unlikely(!vma)) | ||
177 | goto out; | ||
178 | if (unlikely(vma->vm_start > addr)) | ||
179 | goto check_stack; | ||
180 | |||
181 | /* | ||
182 | * Ok, we have a good vm_area for this memory access, so we can handle | ||
183 | * it. | ||
184 | */ | ||
185 | good_area: | ||
186 | if (access_error(esr, vma)) { | ||
187 | fault = VM_FAULT_BADACCESS; | ||
188 | goto out; | ||
189 | } | ||
190 | |||
191 | return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); | ||
192 | |||
193 | check_stack: | ||
194 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) | ||
195 | goto good_area; | ||
196 | out: | ||
197 | return fault; | ||
198 | } | ||
199 | |||
200 | static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | ||
201 | struct pt_regs *regs) | ||
202 | { | ||
203 | struct task_struct *tsk; | ||
204 | struct mm_struct *mm; | ||
205 | int fault, sig, code; | ||
206 | int write = esr & ESR_WRITE; | ||
207 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
208 | (write ? FAULT_FLAG_WRITE : 0); | ||
209 | |||
210 | tsk = current; | ||
211 | mm = tsk->mm; | ||
212 | |||
213 | /* Enable interrupts if they were enabled in the parent context. */ | ||
214 | if (interrupts_enabled(regs)) | ||
215 | local_irq_enable(); | ||
216 | |||
217 | /* | ||
218 | * If we're in an interrupt or have no user context, we must not take | ||
219 | * the fault. | ||
220 | */ | ||
221 | if (in_atomic() || !mm) | ||
222 | goto no_context; | ||
223 | |||
224 | /* | ||
225 | * As per x86, we may deadlock here. However, since the kernel only | ||
226 | * validly references user space from well defined areas of the code, | ||
227 | * we can bug out early if this is from code which shouldn't. | ||
228 | */ | ||
229 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
230 | if (!user_mode(regs) && !search_exception_tables(regs->pc)) | ||
231 | goto no_context; | ||
232 | retry: | ||
233 | down_read(&mm->mmap_sem); | ||
234 | } else { | ||
235 | /* | ||
236 | * The above down_read_trylock() might have succeeded in which | ||
237 | * case, we'll have missed the might_sleep() from down_read(). | ||
238 | */ | ||
239 | might_sleep(); | ||
240 | #ifdef CONFIG_DEBUG_VM | ||
241 | if (!user_mode(regs) && !search_exception_tables(regs->pc)) | ||
242 | goto no_context; | ||
243 | #endif | ||
244 | } | ||
245 | |||
246 | fault = __do_page_fault(mm, addr, esr, flags, tsk); | ||
247 | |||
248 | /* | ||
249 | * If we need to retry but a fatal signal is pending, handle the | ||
250 | * signal first. We do not need to release the mmap_sem because it | ||
251 | * would already be released in __lock_page_or_retry in mm/filemap.c. | ||
252 | */ | ||
253 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
254 | return 0; | ||
255 | |||
256 | /* | ||
257 | * Major/minor page fault accounting is only done on the initial | ||
258 | * attempt. If we go through a retry, it is extremely likely that the | ||
259 | * page will be found in page cache at that point. | ||
260 | */ | ||
261 | |||
262 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); | ||
263 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | ||
264 | if (fault & VM_FAULT_MAJOR) { | ||
265 | tsk->maj_flt++; | ||
266 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, | ||
267 | addr); | ||
268 | } else { | ||
269 | tsk->min_flt++; | ||
270 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, | ||
271 | addr); | ||
272 | } | ||
273 | if (fault & VM_FAULT_RETRY) { | ||
274 | /* | ||
275 | * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of | ||
276 | * starvation. | ||
277 | */ | ||
278 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
279 | goto retry; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | up_read(&mm->mmap_sem); | ||
284 | |||
285 | /* | ||
286 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | ||
287 | */ | ||
288 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | | ||
289 | VM_FAULT_BADACCESS)))) | ||
290 | return 0; | ||
291 | |||
292 | if (fault & VM_FAULT_OOM) { | ||
293 | /* | ||
294 | * We ran out of memory, call the OOM killer, and return to | ||
295 | * userspace (which will retry the fault, or kill us if we got | ||
296 | * oom-killed). | ||
297 | */ | ||
298 | pagefault_out_of_memory(); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * If we are in kernel mode at this point, we have no context to | ||
304 | * handle this fault with. | ||
305 | */ | ||
306 | if (!user_mode(regs)) | ||
307 | goto no_context; | ||
308 | |||
309 | if (fault & VM_FAULT_SIGBUS) { | ||
310 | /* | ||
311 | * We had some memory, but were unable to successfully fix up | ||
312 | * this page fault. | ||
313 | */ | ||
314 | sig = SIGBUS; | ||
315 | code = BUS_ADRERR; | ||
316 | } else { | ||
317 | /* | ||
318 | * Something tried to access memory that isn't in our memory | ||
319 | * map. | ||
320 | */ | ||
321 | sig = SIGSEGV; | ||
322 | code = fault == VM_FAULT_BADACCESS ? | ||
323 | SEGV_ACCERR : SEGV_MAPERR; | ||
324 | } | ||
325 | |||
326 | __do_user_fault(tsk, addr, esr, sig, code, regs); | ||
327 | return 0; | ||
328 | |||
329 | no_context: | ||
330 | __do_kernel_fault(mm, addr, esr, regs); | ||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | /* | ||
335 | * First Level Translation Fault Handler | ||
336 | * | ||
337 | * We enter here because the first level page table doesn't contain a valid | ||
338 | * entry for the address. | ||
339 | * | ||
340 | * If the address is in kernel space (>= TASK_SIZE), then we are probably | ||
341 | * faulting in the vmalloc() area. | ||
342 | * | ||
343 | * If the init_task's first level page tables contains the relevant entry, we | ||
344 | * copy the it to this task. If not, we send the process a signal, fixup the | ||
345 | * exception, or oops the kernel. | ||
346 | * | ||
347 | * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt | ||
348 | * or a critical region, and should only copy the information from the master | ||
349 | * page table, nothing more. | ||
350 | */ | ||
351 | static int __kprobes do_translation_fault(unsigned long addr, | ||
352 | unsigned int esr, | ||
353 | struct pt_regs *regs) | ||
354 | { | ||
355 | if (addr < TASK_SIZE) | ||
356 | return do_page_fault(addr, esr, regs); | ||
357 | |||
358 | do_bad_area(addr, esr, regs); | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Some section permission faults need to be handled gracefully. They can | ||
364 | * happen due to a __{get,put}_user during an oops. | ||
365 | */ | ||
366 | static int do_sect_fault(unsigned long addr, unsigned int esr, | ||
367 | struct pt_regs *regs) | ||
368 | { | ||
369 | do_bad_area(addr, esr, regs); | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * This abort handler always returns "fault". | ||
375 | */ | ||
376 | static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) | ||
377 | { | ||
378 | return 1; | ||
379 | } | ||
380 | |||
381 | static struct fault_info { | ||
382 | int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); | ||
383 | int sig; | ||
384 | int code; | ||
385 | const char *name; | ||
386 | } fault_info[] = { | ||
387 | { do_bad, SIGBUS, 0, "ttbr address size fault" }, | ||
388 | { do_bad, SIGBUS, 0, "level 1 address size fault" }, | ||
389 | { do_bad, SIGBUS, 0, "level 2 address size fault" }, | ||
390 | { do_bad, SIGBUS, 0, "level 3 address size fault" }, | ||
391 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" }, | ||
392 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, | ||
393 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | ||
394 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | ||
395 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, | ||
396 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | ||
397 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | ||
398 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, | ||
399 | { do_bad, SIGBUS, 0, "reserved permission fault" }, | ||
400 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, | ||
401 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | ||
402 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, | ||
403 | { do_bad, SIGBUS, 0, "synchronous external abort" }, | ||
404 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, | ||
405 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
406 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
407 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
408 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
409 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
410 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
411 | { do_bad, SIGBUS, 0, "synchronous parity error" }, | ||
412 | { do_bad, SIGBUS, 0, "asynchronous parity error" }, | ||
413 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
414 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
415 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
416 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
417 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
418 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
419 | { do_bad, SIGBUS, 0, "unknown 32" }, | ||
420 | { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, | ||
421 | { do_bad, SIGBUS, 0, "debug event" }, | ||
422 | { do_bad, SIGBUS, 0, "unknown 35" }, | ||
423 | { do_bad, SIGBUS, 0, "unknown 36" }, | ||
424 | { do_bad, SIGBUS, 0, "unknown 37" }, | ||
425 | { do_bad, SIGBUS, 0, "unknown 38" }, | ||
426 | { do_bad, SIGBUS, 0, "unknown 39" }, | ||
427 | { do_bad, SIGBUS, 0, "unknown 40" }, | ||
428 | { do_bad, SIGBUS, 0, "unknown 41" }, | ||
429 | { do_bad, SIGBUS, 0, "unknown 42" }, | ||
430 | { do_bad, SIGBUS, 0, "unknown 43" }, | ||
431 | { do_bad, SIGBUS, 0, "unknown 44" }, | ||
432 | { do_bad, SIGBUS, 0, "unknown 45" }, | ||
433 | { do_bad, SIGBUS, 0, "unknown 46" }, | ||
434 | { do_bad, SIGBUS, 0, "unknown 47" }, | ||
435 | { do_bad, SIGBUS, 0, "unknown 48" }, | ||
436 | { do_bad, SIGBUS, 0, "unknown 49" }, | ||
437 | { do_bad, SIGBUS, 0, "unknown 50" }, | ||
438 | { do_bad, SIGBUS, 0, "unknown 51" }, | ||
439 | { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, | ||
440 | { do_bad, SIGBUS, 0, "unknown 53" }, | ||
441 | { do_bad, SIGBUS, 0, "unknown 54" }, | ||
442 | { do_bad, SIGBUS, 0, "unknown 55" }, | ||
443 | { do_bad, SIGBUS, 0, "unknown 56" }, | ||
444 | { do_bad, SIGBUS, 0, "unknown 57" }, | ||
445 | { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, | ||
446 | { do_bad, SIGBUS, 0, "unknown 59" }, | ||
447 | { do_bad, SIGBUS, 0, "unknown 60" }, | ||
448 | { do_bad, SIGBUS, 0, "unknown 61" }, | ||
449 | { do_bad, SIGBUS, 0, "unknown 62" }, | ||
450 | { do_bad, SIGBUS, 0, "unknown 63" }, | ||
451 | }; | ||
452 | |||
453 | /* | ||
454 | * Dispatch a data abort to the relevant handler. | ||
455 | */ | ||
456 | asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, | ||
457 | struct pt_regs *regs) | ||
458 | { | ||
459 | const struct fault_info *inf = fault_info + (esr & 63); | ||
460 | struct siginfo info; | ||
461 | |||
462 | if (!inf->fn(addr, esr, regs)) | ||
463 | return; | ||
464 | |||
465 | pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n", | ||
466 | inf->name, esr, addr); | ||
467 | |||
468 | info.si_signo = inf->sig; | ||
469 | info.si_errno = 0; | ||
470 | info.si_code = inf->code; | ||
471 | info.si_addr = (void __user *)addr; | ||
472 | arm64_notify_die("", regs, &info, esr); | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * Handle stack alignment exceptions. | ||
477 | */ | ||
478 | asmlinkage void __exception do_sp_pc_abort(unsigned long addr, | ||
479 | unsigned int esr, | ||
480 | struct pt_regs *regs) | ||
481 | { | ||
482 | struct siginfo info; | ||
483 | |||
484 | info.si_signo = SIGBUS; | ||
485 | info.si_errno = 0; | ||
486 | info.si_code = BUS_ADRALN; | ||
487 | info.si_addr = (void __user *)addr; | ||
488 | arm64_notify_die("", regs, &info, esr); | ||
489 | } | ||
490 | |||
491 | static struct fault_info debug_fault_info[] = { | ||
492 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, | ||
493 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, | ||
494 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, | ||
495 | { do_bad, SIGBUS, 0, "unknown 3" }, | ||
496 | { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, | ||
497 | { do_bad, SIGTRAP, 0, "aarch32 vector catch" }, | ||
498 | { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, | ||
499 | { do_bad, SIGBUS, 0, "unknown 7" }, | ||
500 | }; | ||
501 | |||
502 | void __init hook_debug_fault_code(int nr, | ||
503 | int (*fn)(unsigned long, unsigned int, struct pt_regs *), | ||
504 | int sig, int code, const char *name) | ||
505 | { | ||
506 | BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); | ||
507 | |||
508 | debug_fault_info[nr].fn = fn; | ||
509 | debug_fault_info[nr].sig = sig; | ||
510 | debug_fault_info[nr].code = code; | ||
511 | debug_fault_info[nr].name = name; | ||
512 | } | ||
513 | |||
514 | asmlinkage int __exception do_debug_exception(unsigned long addr, | ||
515 | unsigned int esr, | ||
516 | struct pt_regs *regs) | ||
517 | { | ||
518 | const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr); | ||
519 | struct siginfo info; | ||
520 | |||
521 | if (!inf->fn(addr, esr, regs)) | ||
522 | return 1; | ||
523 | |||
524 | pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n", | ||
525 | inf->name, esr, addr); | ||
526 | |||
527 | info.si_signo = inf->sig; | ||
528 | info.si_errno = 0; | ||
529 | info.si_code = inf->code; | ||
530 | info.si_addr = (void __user *)addr; | ||
531 | arm64_notify_die("", regs, &info, esr); | ||
532 | |||
533 | return 0; | ||
534 | } | ||
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h new file mode 100644 index 000000000000..d8d6e7851c14 --- /dev/null +++ b/arch/arm64/mm/mm.h | |||
@@ -0,0 +1,2 @@ | |||
1 | extern void __flush_dcache_page(struct page *page); | ||
2 | extern void __init bootmem_init(void); | ||
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c new file mode 100644 index 000000000000..7c7be7855638 --- /dev/null +++ b/arch/arm64/mm/mmap.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/mm/mmap.c | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/elf.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/mman.h> | ||
23 | #include <linux/export.h> | ||
24 | #include <linux/shm.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/personality.h> | ||
28 | #include <linux/random.h> | ||
29 | |||
30 | #include <asm/cputype.h> | ||
31 | |||
32 | /* | ||
33 | * Leave enough space between the mmap area and the stack to honour ulimit in | ||
34 | * the face of randomisation. | ||
35 | */ | ||
36 | #define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1)) | ||
37 | #define MAX_GAP (STACK_TOP/6*5) | ||
38 | |||
39 | static int mmap_is_legacy(void) | ||
40 | { | ||
41 | if (current->personality & ADDR_COMPAT_LAYOUT) | ||
42 | return 1; | ||
43 | |||
44 | if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) | ||
45 | return 1; | ||
46 | |||
47 | return sysctl_legacy_va_layout; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Since get_random_int() returns the same value within a 1 jiffy window, we | ||
52 | * will almost always get the same randomisation for the stack and mmap | ||
53 | * region. This will mean the relative distance between stack and mmap will be | ||
54 | * the same. | ||
55 | * | ||
56 | * To avoid this we can shift the randomness by 1 bit. | ||
57 | */ | ||
58 | static unsigned long mmap_rnd(void) | ||
59 | { | ||
60 | unsigned long rnd = 0; | ||
61 | |||
62 | if (current->flags & PF_RANDOMIZE) | ||
63 | rnd = (long)get_random_int() & (STACK_RND_MASK >> 1); | ||
64 | |||
65 | return rnd << (PAGE_SHIFT + 1); | ||
66 | } | ||
67 | |||
68 | static unsigned long mmap_base(void) | ||
69 | { | ||
70 | unsigned long gap = rlimit(RLIMIT_STACK); | ||
71 | |||
72 | if (gap < MIN_GAP) | ||
73 | gap = MIN_GAP; | ||
74 | else if (gap > MAX_GAP) | ||
75 | gap = MAX_GAP; | ||
76 | |||
77 | return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd()); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * This function, called very early during the creation of a new process VM | ||
82 | * image, sets up which VM layout function to use: | ||
83 | */ | ||
84 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
85 | { | ||
86 | /* | ||
87 | * Fall back to the standard layout if the personality bit is set, or | ||
88 | * if the expected stack growth is unlimited: | ||
89 | */ | ||
90 | if (mmap_is_legacy()) { | ||
91 | mm->mmap_base = TASK_UNMAPPED_BASE; | ||
92 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
93 | mm->unmap_area = arch_unmap_area; | ||
94 | } else { | ||
95 | mm->mmap_base = mmap_base(); | ||
96 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
97 | mm->unmap_area = arch_unmap_area_topdown; | ||
98 | } | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); | ||
101 | |||
102 | |||
103 | /* | ||
104 | * You really shouldn't be using read() or write() on /dev/mem. This might go | ||
105 | * away in the future. | ||
106 | */ | ||
107 | int valid_phys_addr_range(unsigned long addr, size_t size) | ||
108 | { | ||
109 | if (addr < PHYS_OFFSET) | ||
110 | return 0; | ||
111 | if (addr + size > __pa(high_memory - 1) + 1) | ||
112 | return 0; | ||
113 | |||
114 | return 1; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Do not allow /dev/mem mappings beyond the supported physical range. | ||
119 | */ | ||
120 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) | ||
121 | { | ||
122 | return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); | ||
123 | } | ||
124 | |||
125 | #ifdef CONFIG_STRICT_DEVMEM | ||
126 | |||
127 | #include <linux/ioport.h> | ||
128 | |||
129 | /* | ||
130 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | ||
131 | * is valid. The argument is a physical page number. We mimic x86 here by | ||
132 | * disallowing access to system RAM as well as device-exclusive MMIO regions. | ||
133 | * This effectively disable read()/write() on /dev/mem. | ||
134 | */ | ||
135 | int devmem_is_allowed(unsigned long pfn) | ||
136 | { | ||
137 | if (iomem_is_exclusive(pfn << PAGE_SHIFT)) | ||
138 | return 0; | ||
139 | if (!page_is_ram(pfn)) | ||
140 | return 1; | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | #endif | ||
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c new file mode 100644 index 000000000000..7083cdada657 --- /dev/null +++ b/arch/arm64/mm/pgd.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * PGD allocation/freeing | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/mm.h> | ||
21 | #include <linux/gfp.h> | ||
22 | #include <linux/highmem.h> | ||
23 | #include <linux/slab.h> | ||
24 | |||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | #include "mm.h" | ||
30 | |||
31 | #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) | ||
32 | |||
33 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
34 | { | ||
35 | pgd_t *new_pgd; | ||
36 | |||
37 | if (PGD_SIZE == PAGE_SIZE) | ||
38 | new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); | ||
39 | else | ||
40 | new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); | ||
41 | |||
42 | if (!new_pgd) | ||
43 | return NULL; | ||
44 | |||
45 | return new_pgd; | ||
46 | } | ||
47 | |||
48 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
49 | { | ||
50 | if (PGD_SIZE == PAGE_SIZE) | ||
51 | free_page((unsigned long)pgd); | ||
52 | else | ||
53 | kfree(pgd); | ||
54 | } | ||