diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable-frag.c')
-rw-r--r-- | arch/powerpc/mm/pgtable-frag.c | 119 |
1 files changed, 119 insertions, 0 deletions
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c new file mode 100644 index 000000000000..af23a587f019 --- /dev/null +++ b/arch/powerpc/mm/pgtable-frag.c | |||
@@ -0,0 +1,119 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | /* | ||
4 | * Handling Page Tables through page fragments | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/gfp.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/percpu.h> | ||
12 | #include <linux/hardirq.h> | ||
13 | #include <linux/hugetlb.h> | ||
14 | #include <asm/pgalloc.h> | ||
15 | #include <asm/tlbflush.h> | ||
16 | #include <asm/tlb.h> | ||
17 | |||
18 | void pte_frag_destroy(void *pte_frag) | ||
19 | { | ||
20 | int count; | ||
21 | struct page *page; | ||
22 | |||
23 | page = virt_to_page(pte_frag); | ||
24 | /* drop all the pending references */ | ||
25 | count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; | ||
26 | /* We allow PTE_FRAG_NR fragments from a PTE page */ | ||
27 | if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { | ||
28 | pgtable_page_dtor(page); | ||
29 | __free_page(page); | ||
30 | } | ||
31 | } | ||
32 | |||
33 | static pte_t *get_pte_from_cache(struct mm_struct *mm) | ||
34 | { | ||
35 | void *pte_frag, *ret; | ||
36 | |||
37 | if (PTE_FRAG_NR == 1) | ||
38 | return NULL; | ||
39 | |||
40 | spin_lock(&mm->page_table_lock); | ||
41 | ret = pte_frag_get(&mm->context); | ||
42 | if (ret) { | ||
43 | pte_frag = ret + PTE_FRAG_SIZE; | ||
44 | /* | ||
45 | * If we have taken up all the fragments mark PTE page NULL | ||
46 | */ | ||
47 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) | ||
48 | pte_frag = NULL; | ||
49 | pte_frag_set(&mm->context, pte_frag); | ||
50 | } | ||
51 | spin_unlock(&mm->page_table_lock); | ||
52 | return (pte_t *)ret; | ||
53 | } | ||
54 | |||
55 | static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) | ||
56 | { | ||
57 | void *ret = NULL; | ||
58 | struct page *page; | ||
59 | |||
60 | if (!kernel) { | ||
61 | page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); | ||
62 | if (!page) | ||
63 | return NULL; | ||
64 | if (!pgtable_page_ctor(page)) { | ||
65 | __free_page(page); | ||
66 | return NULL; | ||
67 | } | ||
68 | } else { | ||
69 | page = alloc_page(PGALLOC_GFP); | ||
70 | if (!page) | ||
71 | return NULL; | ||
72 | } | ||
73 | |||
74 | atomic_set(&page->pt_frag_refcount, 1); | ||
75 | |||
76 | ret = page_address(page); | ||
77 | /* | ||
78 | * if we support only one fragment just return the | ||
79 | * allocated page. | ||
80 | */ | ||
81 | if (PTE_FRAG_NR == 1) | ||
82 | return ret; | ||
83 | spin_lock(&mm->page_table_lock); | ||
84 | /* | ||
85 | * If we find pgtable_page set, we return | ||
86 | * the allocated page with single fragement | ||
87 | * count. | ||
88 | */ | ||
89 | if (likely(!pte_frag_get(&mm->context))) { | ||
90 | atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); | ||
91 | pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE); | ||
92 | } | ||
93 | spin_unlock(&mm->page_table_lock); | ||
94 | |||
95 | return (pte_t *)ret; | ||
96 | } | ||
97 | |||
98 | pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) | ||
99 | { | ||
100 | pte_t *pte; | ||
101 | |||
102 | pte = get_pte_from_cache(mm); | ||
103 | if (pte) | ||
104 | return pte; | ||
105 | |||
106 | return __alloc_for_ptecache(mm, kernel); | ||
107 | } | ||
108 | |||
109 | void pte_fragment_free(unsigned long *table, int kernel) | ||
110 | { | ||
111 | struct page *page = virt_to_page(table); | ||
112 | |||
113 | BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); | ||
114 | if (atomic_dec_and_test(&page->pt_frag_refcount)) { | ||
115 | if (!kernel) | ||
116 | pgtable_page_dtor(page); | ||
117 | __free_page(page); | ||
118 | } | ||
119 | } | ||