aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-09-04 11:54:03 -0400
committerIngo Molnar <mingo@kernel.org>2019-04-03 04:32:52 -0400
commit7bb8709d6ad3ceeb5010a98b0d7eb11db8836da1 (patch)
tree49058851054450050c152e8ce4bbe5c5df3f6834 /arch/um/include
parentc5b27a889da92f4a969d61df77bd4f79ffce57c9 (diff)
um/tlb: Convert to generic mmu_gather
Generic mmu_gather provides the simple flush_tlb_range() based range tracking mmu_gather UM needs. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nick Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/um/include')
-rw-r--r--arch/um/include/asm/tlb.h156
1 files changed, 2 insertions, 154 deletions
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 6463f3ab1767..70ee60383900 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -2,160 +2,8 @@
2#ifndef __UM_TLB_H 2#ifndef __UM_TLB_H
3#define __UM_TLB_H 3#define __UM_TLB_H
4 4
5#include <linux/pagemap.h>
6#include <linux/swap.h>
7#include <asm/percpu.h>
8#include <asm/pgalloc.h>
9#include <asm/tlbflush.h> 5#include <asm/tlbflush.h>
10 6#include <asm-generic/cacheflush.h>
11#define tlb_start_vma(tlb, vma) do { } while (0) 7#include <asm-generic/tlb.h>
12#define tlb_end_vma(tlb, vma) do { } while (0)
13#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
14
15/* struct mmu_gather is an opaque type used by the mm code for passing around
16 * any data needed by arch specific code for tlb_remove_page.
17 */
18struct mmu_gather {
19 struct mm_struct *mm;
20 unsigned int need_flush; /* Really unmapped some ptes? */
21 unsigned long start;
22 unsigned long end;
23 unsigned int fullmm; /* non-zero means full mm flush */
24};
25
26static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
27 unsigned long address)
28{
29 if (tlb->start > address)
30 tlb->start = address;
31 if (tlb->end < address + PAGE_SIZE)
32 tlb->end = address + PAGE_SIZE;
33}
34
35static inline void init_tlb_gather(struct mmu_gather *tlb)
36{
37 tlb->need_flush = 0;
38
39 tlb->start = TASK_SIZE;
40 tlb->end = 0;
41
42 if (tlb->fullmm) {
43 tlb->start = 0;
44 tlb->end = TASK_SIZE;
45 }
46}
47
48static inline void
49arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
50 unsigned long start, unsigned long end)
51{
52 tlb->mm = mm;
53 tlb->start = start;
54 tlb->end = end;
55 tlb->fullmm = !(start | (end+1));
56
57 init_tlb_gather(tlb);
58}
59
60extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
61 unsigned long end);
62
63static inline void
64tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
65{
66 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
67}
68
69static inline void
70tlb_flush_mmu_free(struct mmu_gather *tlb)
71{
72 init_tlb_gather(tlb);
73}
74
75static inline void
76tlb_flush_mmu(struct mmu_gather *tlb)
77{
78 if (!tlb->need_flush)
79 return;
80
81 tlb_flush_mmu_tlbonly(tlb);
82 tlb_flush_mmu_free(tlb);
83}
84
85/* arch_tlb_finish_mmu
86 * Called at the end of the shootdown operation to free up any resources
87 * that were required.
88 */
89static inline void
90arch_tlb_finish_mmu(struct mmu_gather *tlb,
91 unsigned long start, unsigned long end, bool force)
92{
93 if (force) {
94 tlb->start = start;
95 tlb->end = end;
96 tlb->need_flush = 1;
97 }
98 tlb_flush_mmu(tlb);
99
100 /* keep the page table cache within bounds */
101 check_pgt_cache();
102}
103
104/* tlb_remove_page
105 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
106 * while handling the additional races in SMP caused by other CPUs
107 * caching valid mappings in their TLBs.
108 */
109static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
110{
111 tlb->need_flush = 1;
112 free_page_and_swap_cache(page);
113 return false; /* avoid calling tlb_flush_mmu */
114}
115
116static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
117{
118 __tlb_remove_page(tlb, page);
119}
120
121static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
122 struct page *page, int page_size)
123{
124 return __tlb_remove_page(tlb, page);
125}
126
127static inline void tlb_remove_page_size(struct mmu_gather *tlb,
128 struct page *page, int page_size)
129{
130 return tlb_remove_page(tlb, page);
131}
132
133/**
134 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
135 *
136 * Record the fact that pte's were really umapped in ->need_flush, so we can
137 * later optimise away the tlb invalidate. This helps when userspace is
138 * unmapping already-unmapped pages, which happens quite a lot.
139 */
140#define tlb_remove_tlb_entry(tlb, ptep, address) \
141 do { \
142 tlb->need_flush = 1; \
143 __tlb_remove_tlb_entry(tlb, ptep, address); \
144 } while (0)
145
146#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
147 tlb_remove_tlb_entry(tlb, ptep, address)
148
149static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
150{
151}
152
153#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
154
155#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
156
157#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
158
159#define tlb_migrate_finish(mm) do {} while (0)
160 8
161#endif 9#endif