aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/tlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/tlb.h')
-rw-r--r--arch/arm64/include/asm/tlb.h136
1 files changed, 20 insertions, 116 deletions
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 717031a762c2..72cadf52ca80 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -19,115 +19,44 @@
19#ifndef __ASM_TLB_H 19#ifndef __ASM_TLB_H
20#define __ASM_TLB_H 20#define __ASM_TLB_H
21 21
22#include <linux/pagemap.h>
23#include <linux/swap.h>
24 22
25#include <asm/pgalloc.h> 23#include <asm-generic/tlb.h>
26#include <asm/tlbflush.h>
27
28#define MMU_GATHER_BUNDLE 8
29
30/*
31 * TLB handling. This allows us to remove pages from the page
32 * tables, and efficiently handle the TLB issues.
33 */
34struct mmu_gather {
35 struct mm_struct *mm;
36 unsigned int fullmm;
37 struct vm_area_struct *vma;
38 unsigned long start, end;
39 unsigned long range_start;
40 unsigned long range_end;
41 unsigned int nr;
42 unsigned int max;
43 struct page **pages;
44 struct page *local[MMU_GATHER_BUNDLE];
45};
46 24
47/* 25/*
48 * This is unnecessarily complex. There's three ways the TLB shootdown 26 * There's three ways the TLB shootdown code is used:
49 * code is used:
50 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). 27 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
51 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. 28 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
52 * tlb->vma will be non-NULL.
53 * 2. Unmapping all vmas. See exit_mmap(). 29 * 2. Unmapping all vmas. See exit_mmap().
54 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. 30 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
55 * tlb->vma will be non-NULL. Additionally, page tables will be freed. 31 * Page tables will be freed.
56 * 3. Unmapping argument pages. See shift_arg_pages(). 32 * 3. Unmapping argument pages. See shift_arg_pages().
57 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. 33 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
58 * tlb->vma will be NULL.
59 */ 34 */
60static inline void tlb_flush(struct mmu_gather *tlb) 35static inline void tlb_flush(struct mmu_gather *tlb)
61{ 36{
62 if (tlb->fullmm || !tlb->vma) 37 if (tlb->fullmm) {
63 flush_tlb_mm(tlb->mm); 38 flush_tlb_mm(tlb->mm);
64 else if (tlb->range_end > 0) { 39 } else if (tlb->end > 0) {
65 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); 40 struct vm_area_struct vma = { .vm_mm = tlb->mm, };
66 tlb->range_start = TASK_SIZE; 41 flush_tlb_range(&vma, tlb->start, tlb->end);
67 tlb->range_end = 0; 42 tlb->start = TASK_SIZE;
43 tlb->end = 0;
68 } 44 }
69} 45}
70 46
71static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) 47static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
72{ 48{
73 if (!tlb->fullmm) { 49 if (!tlb->fullmm) {
74 if (addr < tlb->range_start) 50 tlb->start = min(tlb->start, addr);
75 tlb->range_start = addr; 51 tlb->end = max(tlb->end, addr + PAGE_SIZE);
76 if (addr + PAGE_SIZE > tlb->range_end)
77 tlb->range_end = addr + PAGE_SIZE;
78 }
79}
80
81static inline void __tlb_alloc_page(struct mmu_gather *tlb)
82{
83 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
84
85 if (addr) {
86 tlb->pages = (void *)addr;
87 tlb->max = PAGE_SIZE / sizeof(struct page *);
88 } 52 }
89} 53}
90 54
91static inline void tlb_flush_mmu(struct mmu_gather *tlb)
92{
93 tlb_flush(tlb);
94 free_pages_and_swap_cache(tlb->pages, tlb->nr);
95 tlb->nr = 0;
96 if (tlb->pages == tlb->local)
97 __tlb_alloc_page(tlb);
98}
99
100static inline void
101tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
102{
103 tlb->mm = mm;
104 tlb->fullmm = !(start | (end+1));
105 tlb->start = start;
106 tlb->end = end;
107 tlb->vma = NULL;
108 tlb->max = ARRAY_SIZE(tlb->local);
109 tlb->pages = tlb->local;
110 tlb->nr = 0;
111 __tlb_alloc_page(tlb);
112}
113
114static inline void
115tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
116{
117 tlb_flush_mmu(tlb);
118
119 /* keep the page table cache within bounds */
120 check_pgt_cache();
121
122 if (tlb->pages != tlb->local)
123 free_pages((unsigned long)tlb->pages, 0);
124}
125
126/* 55/*
127 * Memorize the range for the TLB flush. 56 * Memorize the range for the TLB flush.
128 */ 57 */
129static inline void 58static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
130tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) 59 unsigned long addr)
131{ 60{
132 tlb_add_flush(tlb, addr); 61 tlb_add_flush(tlb, addr);
133} 62}
@@ -137,38 +66,24 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
137 * case where we're doing a full MM flush. When we're doing a munmap, 66 * case where we're doing a full MM flush. When we're doing a munmap,
138 * the vmas are adjusted to only cover the region to be torn down. 67 * the vmas are adjusted to only cover the region to be torn down.
139 */ 68 */
140static inline void 69static inline void tlb_start_vma(struct mmu_gather *tlb,
141tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 70 struct vm_area_struct *vma)
142{ 71{
143 if (!tlb->fullmm) { 72 if (!tlb->fullmm) {
144 tlb->vma = vma; 73 tlb->start = TASK_SIZE;
145 tlb->range_start = TASK_SIZE; 74 tlb->end = 0;
146 tlb->range_end = 0;
147 } 75 }
148} 76}
149 77
150static inline void 78static inline void tlb_end_vma(struct mmu_gather *tlb,
151tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 79 struct vm_area_struct *vma)
152{ 80{
153 if (!tlb->fullmm) 81 if (!tlb->fullmm)
154 tlb_flush(tlb); 82 tlb_flush(tlb);
155} 83}
156 84
157static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
158{
159 tlb->pages[tlb->nr++] = page;
160 VM_BUG_ON(tlb->nr > tlb->max);
161 return tlb->max - tlb->nr;
162}
163
164static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
165{
166 if (!__tlb_remove_page(tlb, page))
167 tlb_flush_mmu(tlb);
168}
169
170static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 85static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
171 unsigned long addr) 86 unsigned long addr)
172{ 87{
173 pgtable_page_dtor(pte); 88 pgtable_page_dtor(pte);
174 tlb_add_flush(tlb, addr); 89 tlb_add_flush(tlb, addr);
@@ -184,16 +99,5 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
184} 99}
185#endif 100#endif
186 101
187#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
188#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
189#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
190
191#define tlb_migrate_finish(mm) do { } while (0)
192
193static inline void
194tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
195{
196 tlb_add_flush(tlb, addr);
197}
198 102
199#endif 103#endif