diff options
author | Mike Rapoport <rppt@linux.ibm.com> | 2019-07-11 23:57:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-12 14:05:45 -0400 |
commit | 5fba4af4456b5d3f982d4ac1c879d16b36aaa0fb (patch) | |
tree | 0e4431ff013d419e295eec2a53fbcd419100adbf | |
parent | 790c73690c2bbecb3f6f8becbdb11ddc9bcff8cc (diff) |
asm-generic, x86: introduce generic pte_{alloc,free}_one[_kernel]
Most architectures have identical or very similar implementation of
pte_alloc_one_kernel(), pte_alloc_one(), pte_free_kernel() and
pte_free().
Add a generic implementation that can be reused across architectures and
enable its use on x86.
The generic implementation uses
GFP_KERNEL | __GFP_ZERO
for the kernel page tables and
GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT
for the user page tables.
The "base" functions for PTE allocation, namely __pte_alloc_one_kernel()
and __pte_alloc_one() are intended for the architectures that require
additional actions after actual memory allocation or must use non-default
GFP flags.
x86 is switched to use generic pte_alloc_one_kernel(), pte_free_kernel() and
pte_free().
x86 still implements pte_alloc_one() to allow run-time control of GFP
flags required for "userpte" command line option.
Link: http://lkml.kernel.org/r/1557296232-15361-2-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Guo Ren <ren_guo@c-sky.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Sam Creasey <sammy@sammy.net>
Cc: Vincent Chen <deanbo422@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/x86/include/asm/pgalloc.h | 19 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 33 | ||||
-rw-r--r-- | include/asm-generic/pgalloc.h | 107 |
3 files changed, 115 insertions, 44 deletions
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index a281e61ec60c..29aa7859bdee 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -6,6 +6,9 @@ | |||
6 | #include <linux/mm.h> /* for struct page */ | 6 | #include <linux/mm.h> /* for struct page */ |
7 | #include <linux/pagemap.h> | 7 | #include <linux/pagemap.h> |
8 | 8 | ||
9 | #define __HAVE_ARCH_PTE_ALLOC_ONE | ||
10 | #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ | ||
11 | |||
9 | static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } | 12 | static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } |
10 | 13 | ||
11 | #ifdef CONFIG_PARAVIRT_XXL | 14 | #ifdef CONFIG_PARAVIRT_XXL |
@@ -47,24 +50,8 @@ extern gfp_t __userpte_alloc_gfp; | |||
47 | extern pgd_t *pgd_alloc(struct mm_struct *); | 50 | extern pgd_t *pgd_alloc(struct mm_struct *); |
48 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | 51 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
49 | 52 | ||
50 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *); | ||
51 | extern pgtable_t pte_alloc_one(struct mm_struct *); | 53 | extern pgtable_t pte_alloc_one(struct mm_struct *); |
52 | 54 | ||
53 | /* Should really implement gc for free page table pages. This could be | ||
54 | done with a reference count in struct page. */ | ||
55 | |||
56 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
57 | { | ||
58 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | ||
59 | free_page((unsigned long)pte); | ||
60 | } | ||
61 | |||
62 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | ||
63 | { | ||
64 | pgtable_page_dtor(pte); | ||
65 | __free_page(pte); | ||
66 | } | ||
67 | |||
68 | extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); | 55 | extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); |
69 | 56 | ||
70 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | 57 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 1f67b1e15bf6..44816ff6411f 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -13,33 +13,17 @@ phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; | |||
13 | EXPORT_SYMBOL(physical_mask); | 13 | EXPORT_SYMBOL(physical_mask); |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) | ||
17 | |||
18 | #ifdef CONFIG_HIGHPTE | 16 | #ifdef CONFIG_HIGHPTE |
19 | #define PGALLOC_USER_GFP __GFP_HIGHMEM | 17 | #define PGTABLE_HIGHMEM __GFP_HIGHMEM |
20 | #else | 18 | #else |
21 | #define PGALLOC_USER_GFP 0 | 19 | #define PGTABLE_HIGHMEM 0 |
22 | #endif | 20 | #endif |
23 | 21 | ||
24 | gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; | 22 | gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM; |
25 | |||
26 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm) | ||
27 | { | ||
28 | return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); | ||
29 | } | ||
30 | 23 | ||
31 | pgtable_t pte_alloc_one(struct mm_struct *mm) | 24 | pgtable_t pte_alloc_one(struct mm_struct *mm) |
32 | { | 25 | { |
33 | struct page *pte; | 26 | return __pte_alloc_one(mm, __userpte_alloc_gfp); |
34 | |||
35 | pte = alloc_pages(__userpte_alloc_gfp, 0); | ||
36 | if (!pte) | ||
37 | return NULL; | ||
38 | if (!pgtable_page_ctor(pte)) { | ||
39 | __free_page(pte); | ||
40 | return NULL; | ||
41 | } | ||
42 | return pte; | ||
43 | } | 27 | } |
44 | 28 | ||
45 | static int __init setup_userpte(char *arg) | 29 | static int __init setup_userpte(char *arg) |
@@ -235,7 +219,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) | |||
235 | { | 219 | { |
236 | int i; | 220 | int i; |
237 | bool failed = false; | 221 | bool failed = false; |
238 | gfp_t gfp = PGALLOC_GFP; | 222 | gfp_t gfp = GFP_PGTABLE_USER; |
239 | 223 | ||
240 | if (mm == &init_mm) | 224 | if (mm == &init_mm) |
241 | gfp &= ~__GFP_ACCOUNT; | 225 | gfp &= ~__GFP_ACCOUNT; |
@@ -399,14 +383,14 @@ static inline pgd_t *_pgd_alloc(void) | |||
399 | * We allocate one page for pgd. | 383 | * We allocate one page for pgd. |
400 | */ | 384 | */ |
401 | if (!SHARED_KERNEL_PMD) | 385 | if (!SHARED_KERNEL_PMD) |
402 | return (pgd_t *)__get_free_pages(PGALLOC_GFP, | 386 | return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, |
403 | PGD_ALLOCATION_ORDER); | 387 | PGD_ALLOCATION_ORDER); |
404 | 388 | ||
405 | /* | 389 | /* |
406 | * Now PAE kernel is not running as a Xen domain. We can allocate | 390 | * Now PAE kernel is not running as a Xen domain. We can allocate |
407 | * a 32-byte slab for pgd to save memory space. | 391 | * a 32-byte slab for pgd to save memory space. |
408 | */ | 392 | */ |
409 | return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); | 393 | return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER); |
410 | } | 394 | } |
411 | 395 | ||
412 | static inline void _pgd_free(pgd_t *pgd) | 396 | static inline void _pgd_free(pgd_t *pgd) |
@@ -424,7 +408,8 @@ void __init pgd_cache_init(void) | |||
424 | 408 | ||
425 | static inline pgd_t *_pgd_alloc(void) | 409 | static inline pgd_t *_pgd_alloc(void) |
426 | { | 410 | { |
427 | return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); | 411 | return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, |
412 | PGD_ALLOCATION_ORDER); | ||
428 | } | 413 | } |
429 | 414 | ||
430 | static inline void _pgd_free(pgd_t *pgd) | 415 | static inline void _pgd_free(pgd_t *pgd) |
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 948714c1535a..8476175c07e7 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h | |||
@@ -1,13 +1,112 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __ASM_GENERIC_PGALLOC_H | 2 | #ifndef __ASM_GENERIC_PGALLOC_H |
3 | #define __ASM_GENERIC_PGALLOC_H | 3 | #define __ASM_GENERIC_PGALLOC_H |
4 | /* | 4 | |
5 | * an empty file is enough for a nommu architecture | ||
6 | */ | ||
7 | #ifdef CONFIG_MMU | 5 | #ifdef CONFIG_MMU |
8 | #error need to implement an architecture specific asm/pgalloc.h | 6 | |
7 | #define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) | ||
8 | #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) | ||
9 | |||
10 | /** | ||
11 | * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table | ||
12 | * @mm: the mm_struct of the current context | ||
13 | * | ||
14 | * This function is intended for architectures that need | ||
15 | * anything beyond simple page allocation. | ||
16 | * | ||
17 | * Return: pointer to the allocated memory or %NULL on error | ||
18 | */ | ||
19 | static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) | ||
20 | { | ||
21 | return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); | ||
22 | } | ||
23 | |||
24 | #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL | ||
25 | /** | ||
26 | * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table | ||
27 | * @mm: the mm_struct of the current context | ||
28 | * | ||
29 | * Return: pointer to the allocated memory or %NULL on error | ||
30 | */ | ||
31 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) | ||
32 | { | ||
33 | return __pte_alloc_one_kernel(mm); | ||
34 | } | ||
35 | #endif | ||
36 | |||
37 | /** | ||
38 | * pte_free_kernel - free PTE-level kernel page table page | ||
39 | * @mm: the mm_struct of the current context | ||
40 | * @pte: pointer to the memory containing the page table | ||
41 | */ | ||
42 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
43 | { | ||
44 | free_page((unsigned long)pte); | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | * __pte_alloc_one - allocate a page for PTE-level user page table | ||
49 | * @mm: the mm_struct of the current context | ||
50 | * @gfp: GFP flags to use for the allocation | ||
51 | * | ||
52 | * Allocates a page and runs the pgtable_page_ctor(). | ||
53 | * | ||
54 | * This function is intended for architectures that need | ||
55 | * anything beyond simple page allocation or must have custom GFP flags. | ||
56 | * | ||
57 | * Return: `struct page` initialized as page table or %NULL on error | ||
58 | */ | ||
59 | static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) | ||
60 | { | ||
61 | struct page *pte; | ||
62 | |||
63 | pte = alloc_page(gfp); | ||
64 | if (!pte) | ||
65 | return NULL; | ||
66 | if (!pgtable_page_ctor(pte)) { | ||
67 | __free_page(pte); | ||
68 | return NULL; | ||
69 | } | ||
70 | |||
71 | return pte; | ||
72 | } | ||
73 | |||
74 | #ifndef __HAVE_ARCH_PTE_ALLOC_ONE | ||
75 | /** | ||
76 | * pte_alloc_one - allocate a page for PTE-level user page table | ||
77 | * @mm: the mm_struct of the current context | ||
78 | * | ||
79 | * Allocates a page and runs the pgtable_page_ctor(). | ||
80 | * | ||
81 | * Return: `struct page` initialized as page table or %NULL on error | ||
82 | */ | ||
83 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm) | ||
84 | { | ||
85 | return __pte_alloc_one(mm, GFP_PGTABLE_USER); | ||
86 | } | ||
9 | #endif | 87 | #endif |
10 | 88 | ||
89 | /* | ||
90 | * Should really implement gc for free page table pages. This could be | ||
91 | * done with a reference count in struct page. | ||
92 | */ | ||
93 | |||
94 | /** | ||
95 | * pte_free - free PTE-level user page table page | ||
96 | * @mm: the mm_struct of the current context | ||
97 | * @pte_page: the `struct page` representing the page table | ||
98 | */ | ||
99 | static inline void pte_free(struct mm_struct *mm, struct page *pte_page) | ||
100 | { | ||
101 | pgtable_page_dtor(pte_page); | ||
102 | __free_page(pte_page); | ||
103 | } | ||
104 | |||
105 | #else /* CONFIG_MMU */ | ||
106 | |||
107 | /* This is enough for a nommu architecture */ | ||
11 | #define check_pgt_cache() do { } while (0) | 108 | #define check_pgt_cache() do { } while (0) |
12 | 109 | ||
110 | #endif /* CONFIG_MMU */ | ||
111 | |||
13 | #endif /* __ASM_GENERIC_PGALLOC_H */ | 112 | #endif /* __ASM_GENERIC_PGALLOC_H */ |