diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2013-07-01 06:20:58 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2013-07-01 06:20:58 -0400 |
commit | aa729dccb5e8dfbc78e2e235b8754d6acccee731 (patch) | |
tree | f6123726a25957481e2528b9b6b0d0cfd992a5fb /arch/arm64/include | |
parent | ee877b5321c4dfee9dc9f2a12b19ddcd33149f6a (diff) | |
parent | af07484863e0c20796081e57093886c22dc16705 (diff) |
Merge branch 'for-next/hugepages' of git://git.linaro.org/people/stevecapper/linux into upstream-hugepages
* 'for-next/hugepages' of git://git.linaro.org/people/stevecapper/linux:
ARM64: mm: THP support.
ARM64: mm: Raise MAX_ORDER for 64KB pages and THP.
ARM64: mm: HugeTLB support.
ARM64: mm: Move PTE_PROT_NONE bit.
ARM64: mm: Make PAGE_NONE pages read only and no-execute.
ARM64: mm: Restore memblock limit when map_mem finished.
mm: thp: Correct the HPAGE_PMD_ORDER check.
x86: mm: Remove general hugetlb code from x86.
mm: hugetlb: Copy general hugetlb code from x86 to mm.
x86: mm: Remove x86 version of huge_pmd_share.
mm: hugetlb: Copy huge_pmd_share from x86 to mm.
Conflicts:
arch/arm64/Kconfig
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/hugetlb.h | 117 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-hwdef.h | 13 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 96 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlb.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 2 |
5 files changed, 217 insertions, 17 deletions
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h new file mode 100644 index 000000000000..5b7ca8ace95f --- /dev/null +++ b/arch/arm64/include/asm/hugetlb.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/hugetlb.h | ||
3 | * | ||
4 | * Copyright (C) 2013 Linaro Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/include/asm/hugetlb.h | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef __ASM_HUGETLB_H | ||
23 | #define __ASM_HUGETLB_H | ||
24 | |||
25 | #include <asm-generic/hugetlb.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
29 | { | ||
30 | return *ptep; | ||
31 | } | ||
32 | |||
33 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
34 | pte_t *ptep, pte_t pte) | ||
35 | { | ||
36 | set_pte_at(mm, addr, ptep, pte); | ||
37 | } | ||
38 | |||
39 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
40 | unsigned long addr, pte_t *ptep) | ||
41 | { | ||
42 | ptep_clear_flush(vma, addr, ptep); | ||
43 | } | ||
44 | |||
45 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
46 | unsigned long addr, pte_t *ptep) | ||
47 | { | ||
48 | ptep_set_wrprotect(mm, addr, ptep); | ||
49 | } | ||
50 | |||
51 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
52 | unsigned long addr, pte_t *ptep) | ||
53 | { | ||
54 | return ptep_get_and_clear(mm, addr, ptep); | ||
55 | } | ||
56 | |||
57 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
58 | unsigned long addr, pte_t *ptep, | ||
59 | pte_t pte, int dirty) | ||
60 | { | ||
61 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
62 | } | ||
63 | |||
64 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
65 | unsigned long addr, unsigned long end, | ||
66 | unsigned long floor, | ||
67 | unsigned long ceiling) | ||
68 | { | ||
69 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
70 | } | ||
71 | |||
72 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
73 | unsigned long addr, unsigned long len) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static inline int prepare_hugepage_range(struct file *file, | ||
79 | unsigned long addr, unsigned long len) | ||
80 | { | ||
81 | struct hstate *h = hstate_file(file); | ||
82 | if (len & ~huge_page_mask(h)) | ||
83 | return -EINVAL; | ||
84 | if (addr & ~huge_page_mask(h)) | ||
85 | return -EINVAL; | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
90 | { | ||
91 | } | ||
92 | |||
93 | static inline int huge_pte_none(pte_t pte) | ||
94 | { | ||
95 | return pte_none(pte); | ||
96 | } | ||
97 | |||
98 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
99 | { | ||
100 | return pte_wrprotect(pte); | ||
101 | } | ||
102 | |||
103 | static inline int arch_prepare_hugepage(struct page *page) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static inline void arch_release_hugepage(struct page *page) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
113 | { | ||
114 | clear_bit(PG_dcache_clean, &page->flags); | ||
115 | } | ||
116 | |||
117 | #endif /* __ASM_HUGETLB_H */ | ||
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 66367c6c6527..e182a356c979 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -25,17 +25,27 @@ | |||
25 | /* | 25 | /* |
26 | * Hardware page table definitions. | 26 | * Hardware page table definitions. |
27 | * | 27 | * |
28 | * Level 1 descriptor (PUD). | ||
29 | */ | ||
30 | |||
31 | #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) | ||
32 | |||
33 | /* | ||
28 | * Level 2 descriptor (PMD). | 34 | * Level 2 descriptor (PMD). |
29 | */ | 35 | */ |
30 | #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) | 36 | #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) |
31 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) | 37 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) |
32 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) | 38 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) |
33 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) | 39 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) |
40 | #define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1) | ||
34 | 41 | ||
35 | /* | 42 | /* |
36 | * Section | 43 | * Section |
37 | */ | 44 | */ |
38 | #define PMD_SECT_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ | 45 | #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) |
46 | #define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) | ||
47 | #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ | ||
48 | #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ | ||
39 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) | 49 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) |
40 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) | 50 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) |
41 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) | 51 | #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) |
@@ -54,6 +64,7 @@ | |||
54 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) | 64 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) |
55 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) | 65 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) |
56 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) | 66 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) |
67 | #define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) | ||
57 | #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ | 68 | #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ |
58 | #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ | 69 | #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ |
59 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | 70 | #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5588e8ad9762..065e58f01b1e 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -25,8 +25,8 @@ | |||
25 | * Software defined PTE bits definition. | 25 | * Software defined PTE bits definition. |
26 | */ | 26 | */ |
27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) | 27 | #define PTE_VALID (_AT(pteval_t, 1) << 0) |
28 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ | 28 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ |
29 | #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ | 29 | #define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ |
30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) | 30 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | 31 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
32 | 32 | ||
@@ -66,7 +66,7 @@ extern pgprot_t pgprot_default; | |||
66 | 66 | ||
67 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) | 67 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) |
68 | 68 | ||
69 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) | 69 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
70 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 70 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
71 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) | 71 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
72 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 72 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
@@ -82,7 +82,7 @@ extern pgprot_t pgprot_default; | |||
82 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 82 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
83 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) | 83 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) |
84 | 84 | ||
85 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) | 85 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
86 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 86 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
87 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | 87 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
88 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 88 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
@@ -179,12 +179,76 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
179 | /* | 179 | /* |
180 | * Huge pte definitions. | 180 | * Huge pte definitions. |
181 | */ | 181 | */ |
182 | #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) | 182 | #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) |
183 | #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) | 183 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) |
184 | |||
185 | /* | ||
186 | * Hugetlb definitions. | ||
187 | */ | ||
188 | #define HUGE_MAX_HSTATE 2 | ||
189 | #define HPAGE_SHIFT PMD_SHIFT | ||
190 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
191 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
192 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
184 | 193 | ||
185 | #define __HAVE_ARCH_PTE_SPECIAL | 194 | #define __HAVE_ARCH_PTE_SPECIAL |
186 | 195 | ||
187 | /* | 196 | /* |
197 | * Software PMD bits for THP | ||
198 | */ | ||
199 | |||
200 | #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) | ||
201 | #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57) | ||
202 | |||
203 | /* | ||
204 | * THP definitions. | ||
205 | */ | ||
206 | #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) | ||
207 | |||
208 | #define __HAVE_ARCH_PMD_WRITE | ||
209 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) | ||
210 | |||
211 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
212 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | ||
213 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) | ||
214 | #endif | ||
215 | |||
216 | #define PMD_BIT_FUNC(fn,op) \ | ||
217 | static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } | ||
218 | |||
219 | PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); | ||
220 | PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); | ||
221 | PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); | ||
222 | PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); | ||
223 | PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); | ||
224 | PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); | ||
225 | PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); | ||
226 | |||
227 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | ||
228 | |||
229 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | ||
230 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
231 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | ||
232 | |||
233 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) | ||
234 | |||
235 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
236 | { | ||
237 | const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN | | ||
238 | PMD_SECT_RDONLY | PMD_SECT_PROT_NONE | | ||
239 | PMD_SECT_VALID; | ||
240 | pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); | ||
241 | return pmd; | ||
242 | } | ||
243 | |||
244 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) | ||
245 | |||
246 | static inline int has_transparent_hugepage(void) | ||
247 | { | ||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | /* | ||
188 | * Mark the prot value as uncacheable and unbufferable. | 252 | * Mark the prot value as uncacheable and unbufferable. |
189 | */ | 253 | */ |
190 | #define pgprot_noncached(prot) \ | 254 | #define pgprot_noncached(prot) \ |
@@ -293,12 +357,12 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |||
293 | 357 | ||
294 | /* | 358 | /* |
295 | * Encode and decode a swap entry: | 359 | * Encode and decode a swap entry: |
296 | * bits 0-1: present (must be zero) | 360 | * bits 0, 2: present (must both be zero) |
297 | * bit 2: PTE_FILE | 361 | * bit 3: PTE_FILE |
298 | * bits 3-8: swap type | 362 | * bits 4-8: swap type |
299 | * bits 9-63: swap offset | 363 | * bits 9-63: swap offset |
300 | */ | 364 | */ |
301 | #define __SWP_TYPE_SHIFT 3 | 365 | #define __SWP_TYPE_SHIFT 4 |
302 | #define __SWP_TYPE_BITS 6 | 366 | #define __SWP_TYPE_BITS 6 |
303 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) | 367 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
304 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | 368 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
@@ -318,15 +382,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |||
318 | 382 | ||
319 | /* | 383 | /* |
320 | * Encode and decode a file entry: | 384 | * Encode and decode a file entry: |
321 | * bits 0-1: present (must be zero) | 385 | * bits 0, 2: present (must both be zero) |
322 | * bit 2: PTE_FILE | 386 | * bit 3: PTE_FILE |
323 | * bits 3-63: file offset / PAGE_SIZE | 387 | * bits 4-63: file offset / PAGE_SIZE |
324 | */ | 388 | */ |
325 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) | 389 | #define pte_file(pte) (pte_val(pte) & PTE_FILE) |
326 | #define pte_to_pgoff(x) (pte_val(x) >> 3) | 390 | #define pte_to_pgoff(x) (pte_val(x) >> 4) |
327 | #define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) | 391 | #define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) |
328 | 392 | ||
329 | #define PTE_FILE_MAX_BITS 61 | 393 | #define PTE_FILE_MAX_BITS 60 |
330 | 394 | ||
331 | extern int kern_addr_valid(unsigned long addr); | 395 | extern int kern_addr_valid(unsigned long addr); |
332 | 396 | ||
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 654f0968030b..46b3beb4b773 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -187,4 +187,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
187 | 187 | ||
188 | #define tlb_migrate_finish(mm) do { } while (0) | 188 | #define tlb_migrate_finish(mm) do { } while (0) |
189 | 189 | ||
190 | static inline void | ||
191 | tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) | ||
192 | { | ||
193 | tlb_add_flush(tlb, addr); | ||
194 | } | ||
195 | |||
190 | #endif | 196 | #endif |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 122d6320f745..8b482035cfc2 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -117,6 +117,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
117 | dsb(); | 117 | dsb(); |
118 | } | 118 | } |
119 | 119 | ||
120 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
121 | |||
120 | #endif | 122 | #endif |
121 | 123 | ||
122 | #endif | 124 | #endif |