summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/tlb.h6
-rw-r--r--arch/arc/include/asm/tlb.h32
-rw-r--r--arch/arm/include/asm/tlb.h255
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/tlb.h1
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/tlb.h2
-rw-r--r--arch/h8300/include/asm/tlb.h2
-rw-r--r--arch/hexagon/include/asm/tlb.h12
-rw-r--r--arch/ia64/include/asm/machvec.h13
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h2
-rw-r--r--arch/ia64/include/asm/tlb.h259
-rw-r--r--arch/ia64/include/asm/tlbflush.h25
-rw-r--r--arch/ia64/mm/tlb.c23
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c7
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/include/asm/tlb.h14
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/tlb.h9
-rw-r--r--arch/mips/include/asm/tlb.h17
-rw-r--r--arch/nds32/include/asm/tlb.h16
-rw-r--r--arch/nds32/include/asm/tlbflush.h1
-rw-r--r--arch/nios2/Kconfig1
-rw-r--r--arch/nios2/include/asm/tlb.h14
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/include/asm/tlb.h8
-rw-r--r--arch/parisc/include/asm/tlb.h18
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/tlb.h18
-rw-r--r--arch/riscv/include/asm/tlb.h1
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/tlb.h130
-rw-r--r--arch/s390/mm/pgalloc.c63
-rw-r--r--arch/sh/include/asm/pgalloc.h9
-rw-r--r--arch/sh/include/asm/tlb.h132
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/tlb_32.h18
-rw-r--r--arch/um/include/asm/tlb.h158
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/include/asm/tlb.h7
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/tlb.h1
-rw-r--r--arch/xtensa/include/asm/tlb.h26
45 files changed, 142 insertions, 1179 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 33687dddd86a..a826843470ed 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -383,7 +383,13 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
383config HAVE_RCU_TABLE_FREE 383config HAVE_RCU_TABLE_FREE
384 bool 384 bool
385 385
386config HAVE_RCU_TABLE_INVALIDATE 386config HAVE_RCU_TABLE_NO_INVALIDATE
387 bool
388
389config HAVE_MMU_GATHER_PAGE_SIZE
390 bool
391
392config HAVE_MMU_GATHER_NO_GATHER
387 bool 393 bool
388 394
389config ARCH_HAVE_NMI_SAFE_CMPXCHG 395config ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 584a6e114853..c7c976eb6407 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -36,6 +36,7 @@ config ALPHA
36 select ODD_RT_SIGACTION 36 select ODD_RT_SIGACTION
37 select OLD_SIGSUSPEND 37 select OLD_SIGSUSPEND
38 select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 38 select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
39 select MMU_GATHER_NO_RANGE
39 help 40 help
40 The Alpha is a 64-bit general-purpose processor designed and 41 The Alpha is a 64-bit general-purpose processor designed and
41 marketed by the Digital Equipment Corporation of blessed memory, 42 marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h
index 8f5042b61875..4f79e331af5e 100644
--- a/arch/alpha/include/asm/tlb.h
+++ b/arch/alpha/include/asm/tlb.h
@@ -2,12 +2,6 @@
2#ifndef _ALPHA_TLB_H 2#ifndef _ALPHA_TLB_H
3#define _ALPHA_TLB_H 3#define _ALPHA_TLB_H
4 4
5#define tlb_start_vma(tlb, vma) do { } while (0)
6#define tlb_end_vma(tlb, vma) do { } while (0)
7#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
8
9#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
10
11#include <asm-generic/tlb.h> 5#include <asm-generic/tlb.h>
12 6
13#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) 7#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index a9db5f62aaf3..90cac97643a4 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -9,38 +9,6 @@
9#ifndef _ASM_ARC_TLB_H 9#ifndef _ASM_ARC_TLB_H
10#define _ASM_ARC_TLB_H 10#define _ASM_ARC_TLB_H
11 11
12#define tlb_flush(tlb) \
13do { \
14 if (tlb->fullmm) \
15 flush_tlb_mm((tlb)->mm); \
16} while (0)
17
18/*
19 * This pair is called at time of munmap/exit to flush cache and TLB entries
20 * for mappings being torn down.
21 * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
22 * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
23 *
24 * Note, read http://lkml.org/lkml/2004/1/15/6
25 */
26#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
27#define tlb_start_vma(tlb, vma)
28#else
29#define tlb_start_vma(tlb, vma) \
30do { \
31 if (!tlb->fullmm) \
32 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
33} while(0)
34#endif
35
36#define tlb_end_vma(tlb, vma) \
37do { \
38 if (!tlb->fullmm) \
39 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
40} while (0)
41
42#define __tlb_remove_tlb_entry(tlb, ptep, address)
43
44#include <linux/pagemap.h> 12#include <linux/pagemap.h>
45#include <asm-generic/tlb.h> 13#include <asm-generic/tlb.h>
46 14
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f854148c8d7c..bc6d04a09899 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -33,271 +33,42 @@
33#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
34#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
35 35
36#define MMU_GATHER_BUNDLE 8
37
38#ifdef CONFIG_HAVE_RCU_TABLE_FREE
39static inline void __tlb_remove_table(void *_table) 36static inline void __tlb_remove_table(void *_table)
40{ 37{
41 free_page_and_swap_cache((struct page *)_table); 38 free_page_and_swap_cache((struct page *)_table);
42} 39}
43 40
44struct mmu_table_batch { 41#include <asm-generic/tlb.h>
45 struct rcu_head rcu;
46 unsigned int nr;
47 void *tables[0];
48};
49
50#define MAX_TABLE_BATCH \
51 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
52
53extern void tlb_table_flush(struct mmu_gather *tlb);
54extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
55
56#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
57#else
58#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
59#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
60
61/*
62 * TLB handling. This allows us to remove pages from the page
63 * tables, and efficiently handle the TLB issues.
64 */
65struct mmu_gather {
66 struct mm_struct *mm;
67#ifdef CONFIG_HAVE_RCU_TABLE_FREE
68 struct mmu_table_batch *batch;
69 unsigned int need_flush;
70#endif
71 unsigned int fullmm;
72 struct vm_area_struct *vma;
73 unsigned long start, end;
74 unsigned long range_start;
75 unsigned long range_end;
76 unsigned int nr;
77 unsigned int max;
78 struct page **pages;
79 struct page *local[MMU_GATHER_BUNDLE];
80};
81
82DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
83
84/*
85 * This is unnecessarily complex. There's three ways the TLB shootdown
86 * code is used:
87 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
88 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
89 * tlb->vma will be non-NULL.
90 * 2. Unmapping all vmas. See exit_mmap().
91 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
92 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
93 * 3. Unmapping argument pages. See shift_arg_pages().
94 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
95 * tlb->vma will be NULL.
96 */
97static inline void tlb_flush(struct mmu_gather *tlb)
98{
99 if (tlb->fullmm || !tlb->vma)
100 flush_tlb_mm(tlb->mm);
101 else if (tlb->range_end > 0) {
102 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
103 tlb->range_start = TASK_SIZE;
104 tlb->range_end = 0;
105 }
106}
107
108static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
109{
110 if (!tlb->fullmm) {
111 if (addr < tlb->range_start)
112 tlb->range_start = addr;
113 if (addr + PAGE_SIZE > tlb->range_end)
114 tlb->range_end = addr + PAGE_SIZE;
115 }
116}
117
118static inline void __tlb_alloc_page(struct mmu_gather *tlb)
119{
120 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
121
122 if (addr) {
123 tlb->pages = (void *)addr;
124 tlb->max = PAGE_SIZE / sizeof(struct page *);
125 }
126}
127
128static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
129{
130 tlb_flush(tlb);
131#ifdef CONFIG_HAVE_RCU_TABLE_FREE
132 tlb_table_flush(tlb);
133#endif
134}
135
136static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
137{
138 free_pages_and_swap_cache(tlb->pages, tlb->nr);
139 tlb->nr = 0;
140 if (tlb->pages == tlb->local)
141 __tlb_alloc_page(tlb);
142}
143
144static inline void tlb_flush_mmu(struct mmu_gather *tlb)
145{
146 tlb_flush_mmu_tlbonly(tlb);
147 tlb_flush_mmu_free(tlb);
148}
149
150static inline void
151arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
152 unsigned long start, unsigned long end)
153{
154 tlb->mm = mm;
155 tlb->fullmm = !(start | (end+1));
156 tlb->start = start;
157 tlb->end = end;
158 tlb->vma = NULL;
159 tlb->max = ARRAY_SIZE(tlb->local);
160 tlb->pages = tlb->local;
161 tlb->nr = 0;
162 __tlb_alloc_page(tlb);
163 42
164#ifdef CONFIG_HAVE_RCU_TABLE_FREE 43#ifndef CONFIG_HAVE_RCU_TABLE_FREE
165 tlb->batch = NULL; 44#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
166#endif 45#endif
167}
168
169static inline void
170arch_tlb_finish_mmu(struct mmu_gather *tlb,
171 unsigned long start, unsigned long end, bool force)
172{
173 if (force) {
174 tlb->range_start = start;
175 tlb->range_end = end;
176 }
177
178 tlb_flush_mmu(tlb);
179
180 /* keep the page table cache within bounds */
181 check_pgt_cache();
182
183 if (tlb->pages != tlb->local)
184 free_pages((unsigned long)tlb->pages, 0);
185}
186
187/*
188 * Memorize the range for the TLB flush.
189 */
190static inline void
191tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
192{
193 tlb_add_flush(tlb, addr);
194}
195
196#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
197 tlb_remove_tlb_entry(tlb, ptep, address)
198/*
199 * In the case of tlb vma handling, we can optimise these away in the
200 * case where we're doing a full MM flush. When we're doing a munmap,
201 * the vmas are adjusted to only cover the region to be torn down.
202 */
203static inline void
204tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
205{
206 if (!tlb->fullmm) {
207 flush_cache_range(vma, vma->vm_start, vma->vm_end);
208 tlb->vma = vma;
209 tlb->range_start = TASK_SIZE;
210 tlb->range_end = 0;
211 }
212}
213 46
214static inline void 47static inline void
215tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 48__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
216{
217 if (!tlb->fullmm)
218 tlb_flush(tlb);
219}
220
221static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
222{
223 tlb->pages[tlb->nr++] = page;
224 VM_WARN_ON(tlb->nr > tlb->max);
225 if (tlb->nr == tlb->max)
226 return true;
227 return false;
228}
229
230static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
231{
232 if (__tlb_remove_page(tlb, page))
233 tlb_flush_mmu(tlb);
234}
235
236static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
237 struct page *page, int page_size)
238{
239 return __tlb_remove_page(tlb, page);
240}
241
242static inline void tlb_remove_page_size(struct mmu_gather *tlb,
243 struct page *page, int page_size)
244{
245 return tlb_remove_page(tlb, page);
246}
247
248static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
249 unsigned long addr)
250{ 49{
251 pgtable_page_dtor(pte); 50 pgtable_page_dtor(pte);
252 51
253#ifdef CONFIG_ARM_LPAE 52#ifndef CONFIG_ARM_LPAE
254 tlb_add_flush(tlb, addr);
255#else
256 /* 53 /*
257 * With the classic ARM MMU, a pte page has two corresponding pmd 54 * With the classic ARM MMU, a pte page has two corresponding pmd
258 * entries, each covering 1MB. 55 * entries, each covering 1MB.
259 */ 56 */
260 addr &= PMD_MASK; 57 addr = (addr & PMD_MASK) + SZ_1M;
261 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); 58 __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
262 tlb_add_flush(tlb, addr + SZ_1M);
263#endif 59#endif
264 60
265 tlb_remove_entry(tlb, pte); 61 tlb_remove_table(tlb, pte);
266}
267
268static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
269 unsigned long addr)
270{
271#ifdef CONFIG_ARM_LPAE
272 tlb_add_flush(tlb, addr);
273 tlb_remove_entry(tlb, virt_to_page(pmdp));
274#endif
275} 62}
276 63
277static inline void 64static inline void
278tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) 65__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
279{
280 tlb_add_flush(tlb, addr);
281}
282
283#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
284#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
285#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
286
287#define tlb_migrate_finish(mm) do { } while (0)
288
289#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
290static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
291 unsigned int page_size)
292{ 66{
293} 67#ifdef CONFIG_ARM_LPAE
294 68 struct page *page = virt_to_page(pmdp);
295static inline void tlb_flush_remove_tables(struct mm_struct *mm)
296{
297}
298 69
299static inline void tlb_flush_remove_tables_local(void *arg) 70 tlb_remove_table(tlb, page);
300{ 71#endif
301} 72}
302 73
303#endif /* CONFIG_MMU */ 74#endif /* CONFIG_MMU */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7e34b9eba5de..78d9fafac983 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -149,7 +149,6 @@ config ARM64
149 select HAVE_PERF_USER_STACK_DUMP 149 select HAVE_PERF_USER_STACK_DUMP
150 select HAVE_REGS_AND_STACK_ACCESS_API 150 select HAVE_REGS_AND_STACK_ACCESS_API
151 select HAVE_RCU_TABLE_FREE 151 select HAVE_RCU_TABLE_FREE
152 select HAVE_RCU_TABLE_INVALIDATE
153 select HAVE_RSEQ 152 select HAVE_RSEQ
154 select HAVE_STACKPROTECTOR 153 select HAVE_STACKPROTECTOR
155 select HAVE_SYSCALL_TRACEPOINTS 154 select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 106fdc951b6e..37603b5616a5 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -27,6 +27,7 @@ static inline void __tlb_remove_table(void *_table)
27 free_page_and_swap_cache((struct page *)_table); 27 free_page_and_swap_cache((struct page *)_table);
28} 28}
29 29
30#define tlb_flush tlb_flush
30static void tlb_flush(struct mmu_gather *tlb); 31static void tlb_flush(struct mmu_gather *tlb);
31 32
32#include <asm-generic/tlb.h> 33#include <asm-generic/tlb.h>
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index e5cd3c5f8399..3bb75e674161 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -20,6 +20,7 @@ config C6X
20 select GENERIC_CLOCKEVENTS 20 select GENERIC_CLOCKEVENTS
21 select MODULES_USE_ELF_RELA 21 select MODULES_USE_ELF_RELA
22 select ARCH_NO_COHERENT_DMA_MMAP 22 select ARCH_NO_COHERENT_DMA_MMAP
23 select MMU_GATHER_NO_RANGE if MMU
23 24
24config MMU 25config MMU
25 def_bool n 26 def_bool n
diff --git a/arch/c6x/include/asm/tlb.h b/arch/c6x/include/asm/tlb.h
index 34525dea1356..240ba0febb57 100644
--- a/arch/c6x/include/asm/tlb.h
+++ b/arch/c6x/include/asm/tlb.h
@@ -2,8 +2,6 @@
2#ifndef _ASM_C6X_TLB_H 2#ifndef _ASM_C6X_TLB_H
3#define _ASM_C6X_TLB_H 3#define _ASM_C6X_TLB_H
4 4
5#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
6
7#include <asm-generic/tlb.h> 5#include <asm-generic/tlb.h>
8 6
9#endif /* _ASM_C6X_TLB_H */ 7#endif /* _ASM_C6X_TLB_H */
diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h
index 98f344279904..d8201ca31206 100644
--- a/arch/h8300/include/asm/tlb.h
+++ b/arch/h8300/include/asm/tlb.h
@@ -2,8 +2,6 @@
2#ifndef __H8300_TLB_H__ 2#ifndef __H8300_TLB_H__
3#define __H8300_TLB_H__ 3#define __H8300_TLB_H__
4 4
5#define tlb_flush(tlb) do { } while (0)
6
7#include <asm-generic/tlb.h> 5#include <asm-generic/tlb.h>
8 6
9#endif 7#endif
diff --git a/arch/hexagon/include/asm/tlb.h b/arch/hexagon/include/asm/tlb.h
index 2f00772cc08a..f71c4ba83614 100644
--- a/arch/hexagon/include/asm/tlb.h
+++ b/arch/hexagon/include/asm/tlb.h
@@ -22,18 +22,6 @@
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
24 24
25/*
26 * We don't need any special per-pte or per-vma handling...
27 */
28#define tlb_start_vma(tlb, vma) do { } while (0)
29#define tlb_end_vma(tlb, vma) do { } while (0)
30#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
31
32/*
33 * .. because we flush the whole mm when it fills up
34 */
35#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
36
37#include <asm-generic/tlb.h> 25#include <asm-generic/tlb.h>
38 26
39#endif 27#endif
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 5133739966bc..beae261fbcb4 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void);
30typedef void ia64_mv_send_ipi_t (int, int, int, int); 30typedef void ia64_mv_send_ipi_t (int, int, int, int);
31typedef void ia64_mv_timer_interrupt_t (int, void *); 31typedef void ia64_mv_timer_interrupt_t (int, void *);
32typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long); 32typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
33typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
34typedef u8 ia64_mv_irq_to_vector (int); 33typedef u8 ia64_mv_irq_to_vector (int);
35typedef unsigned int ia64_mv_local_vector_to_irq (u8); 34typedef unsigned int ia64_mv_local_vector_to_irq (u8);
36typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); 35typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
@@ -80,11 +79,6 @@ machvec_noop (void)
80} 79}
81 80
82static inline void 81static inline void
83machvec_noop_mm (struct mm_struct *mm)
84{
85}
86
87static inline void
88machvec_noop_task (struct task_struct *task) 82machvec_noop_task (struct task_struct *task)
89{ 83{
90} 84}
@@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus)
96 90
97extern void machvec_setup (char **); 91extern void machvec_setup (char **);
98extern void machvec_timer_interrupt (int, void *); 92extern void machvec_timer_interrupt (int, void *);
99extern void machvec_tlb_migrate_finish (struct mm_struct *);
100 93
101# if defined (CONFIG_IA64_HP_SIM) 94# if defined (CONFIG_IA64_HP_SIM)
102# include <asm/machvec_hpsim.h> 95# include <asm/machvec_hpsim.h>
@@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
124# define platform_send_ipi ia64_mv.send_ipi 117# define platform_send_ipi ia64_mv.send_ipi
125# define platform_timer_interrupt ia64_mv.timer_interrupt 118# define platform_timer_interrupt ia64_mv.timer_interrupt
126# define platform_global_tlb_purge ia64_mv.global_tlb_purge 119# define platform_global_tlb_purge ia64_mv.global_tlb_purge
127# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
128# define platform_dma_init ia64_mv.dma_init 120# define platform_dma_init ia64_mv.dma_init
129# define platform_dma_get_ops ia64_mv.dma_get_ops 121# define platform_dma_get_ops ia64_mv.dma_get_ops
130# define platform_irq_to_vector ia64_mv.irq_to_vector 122# define platform_irq_to_vector ia64_mv.irq_to_vector
@@ -167,7 +159,6 @@ struct ia64_machine_vector {
167 ia64_mv_send_ipi_t *send_ipi; 159 ia64_mv_send_ipi_t *send_ipi;
168 ia64_mv_timer_interrupt_t *timer_interrupt; 160 ia64_mv_timer_interrupt_t *timer_interrupt;
169 ia64_mv_global_tlb_purge_t *global_tlb_purge; 161 ia64_mv_global_tlb_purge_t *global_tlb_purge;
170 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
171 ia64_mv_dma_init *dma_init; 162 ia64_mv_dma_init *dma_init;
172 ia64_mv_dma_get_ops *dma_get_ops; 163 ia64_mv_dma_get_ops *dma_get_ops;
173 ia64_mv_irq_to_vector *irq_to_vector; 164 ia64_mv_irq_to_vector *irq_to_vector;
@@ -206,7 +197,6 @@ struct ia64_machine_vector {
206 platform_send_ipi, \ 197 platform_send_ipi, \
207 platform_timer_interrupt, \ 198 platform_timer_interrupt, \
208 platform_global_tlb_purge, \ 199 platform_global_tlb_purge, \
209 platform_tlb_migrate_finish, \
210 platform_dma_init, \ 200 platform_dma_init, \
211 platform_dma_get_ops, \ 201 platform_dma_get_ops, \
212 platform_irq_to_vector, \ 202 platform_irq_to_vector, \
@@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
270#ifndef platform_global_tlb_purge 260#ifndef platform_global_tlb_purge
271# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ 261# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
272#endif 262#endif
273#ifndef platform_tlb_migrate_finish
274# define platform_tlb_migrate_finish machvec_noop_mm
275#endif
276#ifndef platform_kernel_launch_event 263#ifndef platform_kernel_launch_event
277# define platform_kernel_launch_event machvec_noop 264# define platform_kernel_launch_event machvec_noop
278#endif 265#endif
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index b5153d300289..a243e4fb4877 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init;
34extern ia64_mv_send_ipi_t sn2_send_IPI; 34extern ia64_mv_send_ipi_t sn2_send_IPI;
35extern ia64_mv_timer_interrupt_t sn_timer_interrupt; 35extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
36extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; 36extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
37extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
38extern ia64_mv_irq_to_vector sn_irq_to_vector; 37extern ia64_mv_irq_to_vector sn_irq_to_vector;
39extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; 38extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
40extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem; 39extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
@@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
77#define platform_send_ipi sn2_send_IPI 76#define platform_send_ipi sn2_send_IPI
78#define platform_timer_interrupt sn_timer_interrupt 77#define platform_timer_interrupt sn_timer_interrupt
79#define platform_global_tlb_purge sn2_global_tlb_purge 78#define platform_global_tlb_purge sn2_global_tlb_purge
80#define platform_tlb_migrate_finish sn_tlb_migrate_finish
81#define platform_pci_fixup sn_pci_fixup 79#define platform_pci_fixup sn_pci_fixup
82#define platform_inb __sn_inb 80#define platform_inb __sn_inb
83#define platform_inw __sn_inw 81#define platform_inw __sn_inw
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 516355a774bf..86ec034ba499 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,263 +47,6 @@
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/machvec.h> 48#include <asm/machvec.h>
49 49
50/* 50#include <asm-generic/tlb.h>
51 * If we can't allocate a page to make a big batch of page pointers
52 * to work on, then just handle a few from the on-stack structure.
53 */
54#define IA64_GATHER_BUNDLE 8
55
56struct mmu_gather {
57 struct mm_struct *mm;
58 unsigned int nr;
59 unsigned int max;
60 unsigned char fullmm; /* non-zero means full mm flush */
61 unsigned char need_flush; /* really unmapped some PTEs? */
62 unsigned long start, end;
63 unsigned long start_addr;
64 unsigned long end_addr;
65 struct page **pages;
66 struct page *local[IA64_GATHER_BUNDLE];
67};
68
69struct ia64_tr_entry {
70 u64 ifa;
71 u64 itir;
72 u64 pte;
73 u64 rr;
74}; /*Record for tr entry!*/
75
76extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
77extern void ia64_ptr_entry(u64 target_mask, int slot);
78
79extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
80
81/*
82 region register macros
83*/
84#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
85#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
86#define RR_VE_MASK 0x0000000000000001L
87#define RR_VE_SHIFT 0
88#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
89#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
90#define RR_PS_MASK 0x00000000000000fcL
91#define RR_PS_SHIFT 2
92#define RR_RID_MASK 0x00000000ffffff00L
93#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
94
95static inline void
96ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
97{
98 tlb->need_flush = 0;
99
100 if (tlb->fullmm) {
101 /*
102 * Tearing down the entire address space. This happens both as a result
103 * of exit() and execve(). The latter case necessitates the call to
104 * flush_tlb_mm() here.
105 */
106 flush_tlb_mm(tlb->mm);
107 } else if (unlikely (end - start >= 1024*1024*1024*1024UL
108 || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
109 {
110 /*
111 * If we flush more than a tera-byte or across regions, we're probably
112 * better off just flushing the entire TLB(s). This should be very rare
113 * and is not worth optimizing for.
114 */
115 flush_tlb_all();
116 } else {
117 /*
118 * flush_tlb_range() takes a vma instead of a mm pointer because
119 * some architectures want the vm_flags for ITLB/DTLB flush.
120 */
121 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
122
123 /* flush the address range from the tlb: */
124 flush_tlb_range(&vma, start, end);
125 /* now flush the virt. page-table area mapping the address range: */
126 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
127 }
128
129}
130
131static inline void
132ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
133{
134 unsigned long i;
135 unsigned int nr;
136
137 /* lastly, release the freed pages */
138 nr = tlb->nr;
139
140 tlb->nr = 0;
141 tlb->start_addr = ~0UL;
142 for (i = 0; i < nr; ++i)
143 free_page_and_swap_cache(tlb->pages[i]);
144}
145
146/*
147 * Flush the TLB for address range START to END and, if not in fast mode, release the
148 * freed pages that where gathered up to this point.
149 */
150static inline void
151ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
152{
153 if (!tlb->need_flush)
154 return;
155 ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
156 ia64_tlb_flush_mmu_free(tlb);
157}
158
159static inline void __tlb_alloc_page(struct mmu_gather *tlb)
160{
161 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
162
163 if (addr) {
164 tlb->pages = (void *)addr;
165 tlb->max = PAGE_SIZE / sizeof(void *);
166 }
167}
168
169
170static inline void
171arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
172 unsigned long start, unsigned long end)
173{
174 tlb->mm = mm;
175 tlb->max = ARRAY_SIZE(tlb->local);
176 tlb->pages = tlb->local;
177 tlb->nr = 0;
178 tlb->fullmm = !(start | (end+1));
179 tlb->start = start;
180 tlb->end = end;
181 tlb->start_addr = ~0UL;
182}
183
184/*
185 * Called at the end of the shootdown operation to free up any resources that were
186 * collected.
187 */
188static inline void
189arch_tlb_finish_mmu(struct mmu_gather *tlb,
190 unsigned long start, unsigned long end, bool force)
191{
192 if (force)
193 tlb->need_flush = 1;
194 /*
195 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
196 * tlb->end_addr.
197 */
198 ia64_tlb_flush_mmu(tlb, start, end);
199
200 /* keep the page table cache within bounds */
201 check_pgt_cache();
202
203 if (tlb->pages != tlb->local)
204 free_pages((unsigned long)tlb->pages, 0);
205}
206
207/*
208 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
209 * must be delayed until after the TLB has been flushed (see comments at the beginning of
210 * this file).
211 */
212static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
213{
214 tlb->need_flush = 1;
215
216 if (!tlb->nr && tlb->pages == tlb->local)
217 __tlb_alloc_page(tlb);
218
219 tlb->pages[tlb->nr++] = page;
220 VM_WARN_ON(tlb->nr > tlb->max);
221 if (tlb->nr == tlb->max)
222 return true;
223 return false;
224}
225
226static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
227{
228 ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
229}
230
231static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu_free(tlb);
234}
235
236static inline void tlb_flush_mmu(struct mmu_gather *tlb)
237{
238 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
239}
240
241static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
242{
243 if (__tlb_remove_page(tlb, page))
244 tlb_flush_mmu(tlb);
245}
246
247static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
248 struct page *page, int page_size)
249{
250 return __tlb_remove_page(tlb, page);
251}
252
253static inline void tlb_remove_page_size(struct mmu_gather *tlb,
254 struct page *page, int page_size)
255{
256 return tlb_remove_page(tlb, page);
257}
258
259/*
260 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
261 * PTE, not just those pointing to (normal) physical memory.
262 */
263static inline void
264__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
265{
266 if (tlb->start_addr == ~0UL)
267 tlb->start_addr = address;
268 tlb->end_addr = address + PAGE_SIZE;
269}
270
271#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
272
273#define tlb_start_vma(tlb, vma) do { } while (0)
274#define tlb_end_vma(tlb, vma) do { } while (0)
275
276#define tlb_remove_tlb_entry(tlb, ptep, addr) \
277do { \
278 tlb->need_flush = 1; \
279 __tlb_remove_tlb_entry(tlb, ptep, addr); \
280} while (0)
281
282#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
283 tlb_remove_tlb_entry(tlb, ptep, address)
284
285#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
286static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
287 unsigned int page_size)
288{
289}
290
291#define pte_free_tlb(tlb, ptep, address) \
292do { \
293 tlb->need_flush = 1; \
294 __pte_free_tlb(tlb, ptep, address); \
295} while (0)
296
297#define pmd_free_tlb(tlb, ptep, address) \
298do { \
299 tlb->need_flush = 1; \
300 __pmd_free_tlb(tlb, ptep, address); \
301} while (0)
302
303#define pud_free_tlb(tlb, pudp, address) \
304do { \
305 tlb->need_flush = 1; \
306 __pud_free_tlb(tlb, pudp, address); \
307} while (0)
308 51
309#endif /* _ASM_IA64_TLB_H */ 52#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/ia64/include/asm/tlbflush.h b/arch/ia64/include/asm/tlbflush.h
index 25e280810f6c..ceac10c4d6e2 100644
--- a/arch/ia64/include/asm/tlbflush.h
+++ b/arch/ia64/include/asm/tlbflush.h
@@ -14,6 +14,31 @@
14#include <asm/mmu_context.h> 14#include <asm/mmu_context.h>
15#include <asm/page.h> 15#include <asm/page.h>
16 16
17struct ia64_tr_entry {
18 u64 ifa;
19 u64 itir;
20 u64 pte;
21 u64 rr;
22}; /*Record for tr entry!*/
23
24extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
25extern void ia64_ptr_entry(u64 target_mask, int slot);
26extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
27
28/*
29 region register macros
30*/
31#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
32#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
33#define RR_VE_MASK 0x0000000000000001L
34#define RR_VE_SHIFT 0
35#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
36#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
37#define RR_PS_MASK 0x00000000000000fcL
38#define RR_PS_SHIFT 2
39#define RR_RID_MASK 0x00000000ffffff00L
40#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
41
17/* 42/*
18 * Now for some TLB flushing routines. This is the kind of stuff that 43 * Now for some TLB flushing routines. This is the kind of stuff that
19 * can be very expensive, so try to avoid them whenever possible. 44 * can be very expensive, so try to avoid them whenever possible.
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 5fc89aabdce1..5158bd28de05 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -305,8 +305,8 @@ local_flush_tlb_all (void)
305 ia64_srlz_i(); /* srlz.i implies srlz.d */ 305 ia64_srlz_i(); /* srlz.i implies srlz.d */
306} 306}
307 307
308void 308static void
309flush_tlb_range (struct vm_area_struct *vma, unsigned long start, 309__flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
310 unsigned long end) 310 unsigned long end)
311{ 311{
312 struct mm_struct *mm = vma->vm_mm; 312 struct mm_struct *mm = vma->vm_mm;
@@ -343,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
343 preempt_enable(); 343 preempt_enable();
344 ia64_srlz_i(); /* srlz.i implies srlz.d */ 344 ia64_srlz_i(); /* srlz.i implies srlz.d */
345} 345}
346
347void flush_tlb_range(struct vm_area_struct *vma,
348 unsigned long start, unsigned long end)
349{
350 if (unlikely(end - start >= 1024*1024*1024*1024UL
351 || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
352 /*
353 * If we flush more than a tera-byte or across regions, we're
354 * probably better off just flushing the entire TLB(s). This
355 * should be very rare and is not worth optimizing for.
356 */
357 flush_tlb_all();
358 } else {
359 /* flush the address range from the tlb */
360 __flush_tlb_range(vma, start, end);
361 /* flush the virt. page-table area mapping the addr range */
362 __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
363 }
364}
346EXPORT_SYMBOL(flush_tlb_range); 365EXPORT_SYMBOL(flush_tlb_range);
347 366
348void ia64_tlb_init(void) 367void ia64_tlb_init(void)
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index b73b0ebf8214..b510f4f17fd4 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task)
120 cpu_relax(); 120 cpu_relax();
121} 121}
122 122
123void sn_tlb_migrate_finish(struct mm_struct *mm)
124{
125 /* flush_tlb_mm is inefficient if more than 1 users of mm */
126 if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
127 flush_tlb_mm(mm);
128}
129
130static void 123static void
131sn2_ipi_flush_all_tlb(struct mm_struct *mm) 124sn2_ipi_flush_all_tlb(struct mm_struct *mm)
132{ 125{
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b54206408f91..4e37efbc9296 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -28,6 +28,7 @@ config M68K
28 select OLD_SIGSUSPEND3 28 select OLD_SIGSUSPEND3
29 select OLD_SIGACTION 29 select OLD_SIGACTION
30 select ARCH_DISCARD_MEMBLOCK 30 select ARCH_DISCARD_MEMBLOCK
31 select MMU_GATHER_NO_RANGE if MMU
31 32
32config CPU_BIG_ENDIAN 33config CPU_BIG_ENDIAN
33 def_bool y 34 def_bool y
diff --git a/arch/m68k/include/asm/tlb.h b/arch/m68k/include/asm/tlb.h
index b4b9efb6f963..3c81f6adfc8b 100644
--- a/arch/m68k/include/asm/tlb.h
+++ b/arch/m68k/include/asm/tlb.h
@@ -2,20 +2,6 @@
2#ifndef _M68K_TLB_H 2#ifndef _M68K_TLB_H
3#define _M68K_TLB_H 3#define _M68K_TLB_H
4 4
5/*
6 * m68k doesn't need any special per-pte or
7 * per-vma handling..
8 */
9#define tlb_start_vma(tlb, vma) do { } while (0)
10#define tlb_end_vma(tlb, vma) do { } while (0)
11#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
12
13/*
14 * .. because we flush the whole mm when it
15 * fills up.
16 */
17#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
18
19#include <asm-generic/tlb.h> 5#include <asm-generic/tlb.h>
20 6
21#endif /* _M68K_TLB_H */ 7#endif /* _M68K_TLB_H */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index a51b965b3b82..321e398ab6b5 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -41,6 +41,7 @@ config MICROBLAZE
41 select TRACING_SUPPORT 41 select TRACING_SUPPORT
42 select VIRT_TO_BUS 42 select VIRT_TO_BUS
43 select CPU_NO_EFFICIENT_FFS 43 select CPU_NO_EFFICIENT_FFS
44 select MMU_GATHER_NO_RANGE if MMU
44 45
45# Endianness selection 46# Endianness selection
46choice 47choice
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index 99b6ded54849..628a78ee0a72 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -11,16 +11,7 @@
11#ifndef _ASM_MICROBLAZE_TLB_H 11#ifndef _ASM_MICROBLAZE_TLB_H
12#define _ASM_MICROBLAZE_TLB_H 12#define _ASM_MICROBLAZE_TLB_H
13 13
14#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
15
16#include <linux/pagemap.h> 14#include <linux/pagemap.h>
17
18#ifdef CONFIG_MMU
19#define tlb_start_vma(tlb, vma) do { } while (0)
20#define tlb_end_vma(tlb, vma) do { } while (0)
21#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
22#endif
23
24#include <asm-generic/tlb.h> 15#include <asm-generic/tlb.h>
25 16
26#endif /* _ASM_MICROBLAZE_TLB_H */ 17#endif /* _ASM_MICROBLAZE_TLB_H */
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index b6823b9e94da..90f3ad76d9e0 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -5,23 +5,6 @@
5#include <asm/cpu-features.h> 5#include <asm/cpu-features.h>
6#include <asm/mipsregs.h> 6#include <asm/mipsregs.h>
7 7
8/*
9 * MIPS doesn't need any special per-pte or per-vma handling, except
10 * we need to flush cache for area to be unmapped.
11 */
12#define tlb_start_vma(tlb, vma) \
13 do { \
14 if (!tlb->fullmm) \
15 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
16 } while (0)
17#define tlb_end_vma(tlb, vma) do { } while (0)
18#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
19
20/*
21 * .. because we flush the whole mm when it fills up.
22 */
23#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
24
25#define _UNIQUE_ENTRYHI(base, idx) \ 8#define _UNIQUE_ENTRYHI(base, idx) \
26 (((base) + ((idx) << (PAGE_SHIFT + 1))) | \ 9 (((base) + ((idx) << (PAGE_SHIFT + 1))) | \
27 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) 10 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h
index b35ae5eae3ab..d5ae571c8d30 100644
--- a/arch/nds32/include/asm/tlb.h
+++ b/arch/nds32/include/asm/tlb.h
@@ -4,22 +4,6 @@
4#ifndef __ASMNDS32_TLB_H 4#ifndef __ASMNDS32_TLB_H
5#define __ASMNDS32_TLB_H 5#define __ASMNDS32_TLB_H
6 6
7#define tlb_start_vma(tlb,vma) \
8 do { \
9 if (!tlb->fullmm) \
10 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
11 } while (0)
12
13#define tlb_end_vma(tlb,vma) \
14 do { \
15 if(!tlb->fullmm) \
16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
17 } while (0)
18
19#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
20
21#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
22
23#include <asm-generic/tlb.h> 7#include <asm-generic/tlb.h>
24 8
25#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) 9#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h
index 9b411f401903..38ee769b18d8 100644
--- a/arch/nds32/include/asm/tlbflush.h
+++ b/arch/nds32/include/asm/tlbflush.h
@@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
42 42
43void update_mmu_cache(struct vm_area_struct *vma, 43void update_mmu_cache(struct vm_area_struct *vma,
44 unsigned long address, pte_t * pte); 44 unsigned long address, pte_t * pte);
45void tlb_migrate_finish(struct mm_struct *mm);
46 45
47#endif 46#endif
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 4ef15a61b7bc..3633f8144367 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -24,6 +24,7 @@ config NIOS2
24 select USB_ARCH_HAS_HCD if USB_SUPPORT 24 select USB_ARCH_HAS_HCD if USB_SUPPORT
25 select CPU_NO_EFFICIENT_FFS 25 select CPU_NO_EFFICIENT_FFS
26 select ARCH_DISCARD_MEMBLOCK 26 select ARCH_DISCARD_MEMBLOCK
27 select MMU_GATHER_NO_RANGE if MMU
27 28
28config GENERIC_CSUM 29config GENERIC_CSUM
29 def_bool y 30 def_bool y
diff --git a/arch/nios2/include/asm/tlb.h b/arch/nios2/include/asm/tlb.h
index d3bc648e08b5..f9f2e27e32dd 100644
--- a/arch/nios2/include/asm/tlb.h
+++ b/arch/nios2/include/asm/tlb.h
@@ -11,22 +11,12 @@
11#ifndef _ASM_NIOS2_TLB_H 11#ifndef _ASM_NIOS2_TLB_H
12#define _ASM_NIOS2_TLB_H 12#define _ASM_NIOS2_TLB_H
13 13
14#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
15
16extern void set_mmu_pid(unsigned long pid); 14extern void set_mmu_pid(unsigned long pid);
17 15
18/* 16/*
19 * NiosII doesn't need any special per-pte or per-vma handling, except 17 * NIOS32 does have flush_tlb_range(), but it lacks a limit and fallback to
20 * we need to flush cache for the area to be unmapped. 18 * full mm invalidation. So use flush_tlb_mm() for everything.
21 */ 19 */
22#define tlb_start_vma(tlb, vma) \
23 do { \
24 if (!tlb->fullmm) \
25 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
26 } while (0)
27
28#define tlb_end_vma(tlb, vma) do { } while (0)
29#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
30 20
31#include <linux/pagemap.h> 21#include <linux/pagemap.h>
32#include <asm-generic/tlb.h> 22#include <asm-generic/tlb.h>
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index a5e361fbb75a..c6cf8a49a0ab 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -36,6 +36,7 @@ config OPENRISC
36 select OMPIC if SMP 36 select OMPIC if SMP
37 select ARCH_WANT_FRAME_POINTERS 37 select ARCH_WANT_FRAME_POINTERS
38 select GENERIC_IRQ_MULTI_HANDLER 38 select GENERIC_IRQ_MULTI_HANDLER
39 select MMU_GATHER_NO_RANGE if MMU
39 40
40config CPU_BIG_ENDIAN 41config CPU_BIG_ENDIAN
41 def_bool y 42 def_bool y
diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h
index fa4376a4515d..92d8a4209884 100644
--- a/arch/openrisc/include/asm/tlb.h
+++ b/arch/openrisc/include/asm/tlb.h
@@ -20,14 +20,10 @@
20#define __ASM_OPENRISC_TLB_H__ 20#define __ASM_OPENRISC_TLB_H__
21 21
22/* 22/*
23 * or32 doesn't need any special per-pte or 23 * OpenRISC doesn't have an efficient flush_tlb_range() so use flush_tlb_mm()
24 * per-vma handling.. 24 * for everything.
25 */ 25 */
26#define tlb_start_vma(tlb, vma) do { } while (0)
27#define tlb_end_vma(tlb, vma) do { } while (0)
28#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
29 26
30#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
31#include <linux/pagemap.h> 27#include <linux/pagemap.h>
32#include <asm-generic/tlb.h> 28#include <asm-generic/tlb.h>
33 29
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h
index 0c881e74d8a6..8c0446b04c9e 100644
--- a/arch/parisc/include/asm/tlb.h
+++ b/arch/parisc/include/asm/tlb.h
@@ -2,24 +2,6 @@
2#ifndef _PARISC_TLB_H 2#ifndef _PARISC_TLB_H
3#define _PARISC_TLB_H 3#define _PARISC_TLB_H
4 4
5#define tlb_flush(tlb) \
6do { if ((tlb)->fullmm) \
7 flush_tlb_mm((tlb)->mm);\
8} while (0)
9
10#define tlb_start_vma(tlb, vma) \
11do { if (!(tlb)->fullmm) \
12 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
13} while (0)
14
15#define tlb_end_vma(tlb, vma) \
16do { if (!(tlb)->fullmm) \
17 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
18} while (0)
19
20#define __tlb_remove_tlb_entry(tlb, pte, address) \
21 do { } while (0)
22
23#include <asm-generic/tlb.h> 5#include <asm-generic/tlb.h>
24 6
25#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) 7#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2d0be82c3061..8e1e2abf17eb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -218,6 +218,8 @@ config PPC
218 select HAVE_PERF_REGS 218 select HAVE_PERF_REGS
219 select HAVE_PERF_USER_STACK_DUMP 219 select HAVE_PERF_USER_STACK_DUMP
220 select HAVE_RCU_TABLE_FREE if SMP 220 select HAVE_RCU_TABLE_FREE if SMP
221 select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
222 select HAVE_MMU_GATHER_PAGE_SIZE
221 select HAVE_REGS_AND_STACK_ACCESS_API 223 select HAVE_REGS_AND_STACK_ACCESS_API
222 select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN 224 select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
223 select HAVE_SYSCALL_TRACEPOINTS 225 select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e24c67d5ba75..34fba1ce27f7 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -27,8 +27,8 @@
27#define tlb_start_vma(tlb, vma) do { } while (0) 27#define tlb_start_vma(tlb, vma) do { } while (0)
28#define tlb_end_vma(tlb, vma) do { } while (0) 28#define tlb_end_vma(tlb, vma) do { } while (0)
29#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry 29#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
30#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
31 30
31#define tlb_flush tlb_flush
32extern void tlb_flush(struct mmu_gather *tlb); 32extern void tlb_flush(struct mmu_gather *tlb);
33 33
34/* Get the generic bits... */ 34/* Get the generic bits... */
@@ -46,22 +46,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
46#endif 46#endif
47} 47}
48 48
49static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
50 unsigned int page_size)
51{
52 if (!tlb->page_size)
53 tlb->page_size = page_size;
54 else if (tlb->page_size != page_size) {
55 if (!tlb->fullmm)
56 tlb_flush_mmu(tlb);
57 /*
58 * update the page size after flush for the new
59 * mmu_gather.
60 */
61 tlb->page_size = page_size;
62 }
63}
64
65#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
66static inline int mm_is_core_local(struct mm_struct *mm) 50static inline int mm_is_core_local(struct mm_struct *mm)
67{ 51{
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 439dc7072e05..1ad8d093c58b 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -18,6 +18,7 @@ struct mmu_gather;
18 18
19static void tlb_flush(struct mmu_gather *tlb); 19static void tlb_flush(struct mmu_gather *tlb);
20 20
21#define tlb_flush tlb_flush
21#include <asm-generic/tlb.h> 22#include <asm-generic/tlb.h>
22 23
23static inline void tlb_flush(struct mmu_gather *tlb) 24static inline void tlb_flush(struct mmu_gather *tlb)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b6e3d0653002..cf06e313e103 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -164,11 +164,13 @@ config S390
164 select HAVE_PERF_USER_STACK_DUMP 164 select HAVE_PERF_USER_STACK_DUMP
165 select HAVE_MEMBLOCK_NODE_MAP 165 select HAVE_MEMBLOCK_NODE_MAP
166 select HAVE_MEMBLOCK_PHYS_MAP 166 select HAVE_MEMBLOCK_PHYS_MAP
167 select HAVE_MMU_GATHER_NO_GATHER
167 select HAVE_MOD_ARCH_SPECIFIC 168 select HAVE_MOD_ARCH_SPECIFIC
168 select HAVE_NOP_MCOUNT 169 select HAVE_NOP_MCOUNT
169 select HAVE_OPROFILE 170 select HAVE_OPROFILE
170 select HAVE_PCI 171 select HAVE_PCI
171 select HAVE_PERF_EVENTS 172 select HAVE_PERF_EVENTS
173 select HAVE_RCU_TABLE_FREE
172 select HAVE_REGS_AND_STACK_ACCESS_API 174 select HAVE_REGS_AND_STACK_ACCESS_API
173 select HAVE_RSEQ 175 select HAVE_RSEQ
174 select HAVE_SYSCALL_TRACEPOINTS 176 select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b31c779cf581..aa406c05a350 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,98 +22,39 @@
22 * Pages used for the page tables is a different story. FIXME: more 22 * Pages used for the page tables is a different story. FIXME: more
23 */ 23 */
24 24
25#include <linux/mm.h> 25void __tlb_remove_table(void *_table);
26#include <linux/pagemap.h> 26static inline void tlb_flush(struct mmu_gather *tlb);
27#include <linux/swap.h> 27static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
28#include <asm/processor.h> 28 struct page *page, int page_size);
29#include <asm/pgalloc.h>
30#include <asm/tlbflush.h>
31
32struct mmu_gather {
33 struct mm_struct *mm;
34 struct mmu_table_batch *batch;
35 unsigned int fullmm;
36 unsigned long start, end;
37};
38
39struct mmu_table_batch {
40 struct rcu_head rcu;
41 unsigned int nr;
42 void *tables[0];
43};
44
45#define MAX_TABLE_BATCH \
46 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
47
48extern void tlb_table_flush(struct mmu_gather *tlb);
49extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
50
51static inline void
52arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
53 unsigned long start, unsigned long end)
54{
55 tlb->mm = mm;
56 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
59 tlb->batch = NULL;
60}
61
62static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63{
64 __tlb_flush_mm_lazy(tlb->mm);
65}
66
67static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68{
69 tlb_table_flush(tlb);
70}
71
72 29
73static inline void tlb_flush_mmu(struct mmu_gather *tlb) 30#define tlb_start_vma(tlb, vma) do { } while (0)
74{ 31#define tlb_end_vma(tlb, vma) do { } while (0)
75 tlb_flush_mmu_tlbonly(tlb);
76 tlb_flush_mmu_free(tlb);
77}
78 32
79static inline void 33#define tlb_flush tlb_flush
80arch_tlb_finish_mmu(struct mmu_gather *tlb, 34#define pte_free_tlb pte_free_tlb
81 unsigned long start, unsigned long end, bool force) 35#define pmd_free_tlb pmd_free_tlb
82{ 36#define p4d_free_tlb p4d_free_tlb
83 if (force) { 37#define pud_free_tlb pud_free_tlb
84 tlb->start = start;
85 tlb->end = end;
86 }
87 38
88 tlb_flush_mmu(tlb); 39#include <asm/pgalloc.h>
89} 40#include <asm/tlbflush.h>
41#include <asm-generic/tlb.h>
90 42
91/* 43/*
92 * Release the page cache reference for a pte removed by 44 * Release the page cache reference for a pte removed by
93 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page 45 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
94 * has already been freed, so just do free_page_and_swap_cache. 46 * has already been freed, so just do free_page_and_swap_cache.
95 */ 47 */
96static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97{
98 free_page_and_swap_cache(page);
99 return false; /* avoid calling tlb_flush_mmu */
100}
101
102static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
103{
104 free_page_and_swap_cache(page);
105}
106
107static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, 48static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
108 struct page *page, int page_size) 49 struct page *page, int page_size)
109{ 50{
110 return __tlb_remove_page(tlb, page); 51 free_page_and_swap_cache(page);
52 return false;
111} 53}
112 54
113static inline void tlb_remove_page_size(struct mmu_gather *tlb, 55static inline void tlb_flush(struct mmu_gather *tlb)
114 struct page *page, int page_size)
115{ 56{
116 return tlb_remove_page(tlb, page); 57 __tlb_flush_mm_lazy(tlb->mm);
117} 58}
118 59
119/* 60/*
@@ -121,8 +62,17 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
121 * page table from the tlb. 62 * page table from the tlb.
122 */ 63 */
123static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 64static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
124 unsigned long address) 65 unsigned long address)
125{ 66{
67 __tlb_adjust_range(tlb, address, PAGE_SIZE);
68 tlb->mm->context.flush_mm = 1;
69 tlb->freed_tables = 1;
70 tlb->cleared_ptes = 1;
71 /*
72 * page_table_free_rcu takes care of the allocation bit masks
73 * of the 2K table fragments in the 4K page table page,
74 * then calls tlb_remove_table.
75 */
126 page_table_free_rcu(tlb, (unsigned long *) pte, address); 76 page_table_free_rcu(tlb, (unsigned long *) pte, address);
127} 77}
128 78
@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
139 if (mm_pmd_folded(tlb->mm)) 89 if (mm_pmd_folded(tlb->mm))
140 return; 90 return;
141 pgtable_pmd_page_dtor(virt_to_page(pmd)); 91 pgtable_pmd_page_dtor(virt_to_page(pmd));
92 __tlb_adjust_range(tlb, address, PAGE_SIZE);
93 tlb->mm->context.flush_mm = 1;
94 tlb->freed_tables = 1;
95 tlb->cleared_puds = 1;
142 tlb_remove_table(tlb, pmd); 96 tlb_remove_table(tlb, pmd);
143} 97}
144 98
@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
154{ 108{
155 if (mm_p4d_folded(tlb->mm)) 109 if (mm_p4d_folded(tlb->mm))
156 return; 110 return;
111 __tlb_adjust_range(tlb, address, PAGE_SIZE);
112 tlb->mm->context.flush_mm = 1;
113 tlb->freed_tables = 1;
114 tlb->cleared_p4ds = 1;
157 tlb_remove_table(tlb, p4d); 115 tlb_remove_table(tlb, p4d);
158} 116}
159 117
@@ -169,21 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
169{ 127{
170 if (mm_pud_folded(tlb->mm)) 128 if (mm_pud_folded(tlb->mm))
171 return; 129 return;
130 tlb->mm->context.flush_mm = 1;
131 tlb->freed_tables = 1;
132 tlb->cleared_puds = 1;
172 tlb_remove_table(tlb, pud); 133 tlb_remove_table(tlb, pud);
173} 134}
174 135
175#define tlb_start_vma(tlb, vma) do { } while (0)
176#define tlb_end_vma(tlb, vma) do { } while (0)
177#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
178#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
179#define tlb_migrate_finish(mm) do { } while (0)
180#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
181 tlb_remove_tlb_entry(tlb, ptep, address)
182
183#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
184static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
185 unsigned int page_size)
186{
187}
188 136
189#endif /* _S390_TLB_H */ 137#endif /* _S390_TLB_H */
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index db6bb2f97a2c..99e06213a22b 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
290 tlb_remove_table(tlb, table); 290 tlb_remove_table(tlb, table);
291} 291}
292 292
293static void __tlb_remove_table(void *_table) 293void __tlb_remove_table(void *_table)
294{ 294{
295 unsigned int mask = (unsigned long) _table & 3; 295 unsigned int mask = (unsigned long) _table & 3;
296 void *table = (void *)((unsigned long) _table ^ mask); 296 void *table = (void *)((unsigned long) _table ^ mask);
@@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table)
316 } 316 }
317} 317}
318 318
319static void tlb_remove_table_smp_sync(void *arg)
320{
321 /* Simply deliver the interrupt */
322}
323
324static void tlb_remove_table_one(void *table)
325{
326 /*
327 * This isn't an RCU grace period and hence the page-tables cannot be
328 * assumed to be actually RCU-freed.
329 *
330 * It is however sufficient for software page-table walkers that rely
331 * on IRQ disabling. See the comment near struct mmu_table_batch.
332 */
333 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
334 __tlb_remove_table(table);
335}
336
337static void tlb_remove_table_rcu(struct rcu_head *head)
338{
339 struct mmu_table_batch *batch;
340 int i;
341
342 batch = container_of(head, struct mmu_table_batch, rcu);
343
344 for (i = 0; i < batch->nr; i++)
345 __tlb_remove_table(batch->tables[i]);
346
347 free_page((unsigned long)batch);
348}
349
350void tlb_table_flush(struct mmu_gather *tlb)
351{
352 struct mmu_table_batch **batch = &tlb->batch;
353
354 if (*batch) {
355 call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
356 *batch = NULL;
357 }
358}
359
360void tlb_remove_table(struct mmu_gather *tlb, void *table)
361{
362 struct mmu_table_batch **batch = &tlb->batch;
363
364 tlb->mm->context.flush_mm = 1;
365 if (*batch == NULL) {
366 *batch = (struct mmu_table_batch *)
367 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
368 if (*batch == NULL) {
369 __tlb_flush_mm_lazy(tlb->mm);
370 tlb_remove_table_one(table);
371 return;
372 }
373 (*batch)->nr = 0;
374 }
375 (*batch)->tables[(*batch)->nr++] = table;
376 if ((*batch)->nr == MAX_TABLE_BATCH)
377 tlb_flush_mmu(tlb);
378}
379
380/* 319/*
381 * Base infrastructure required to generate basic asces, region, segment, 320 * Base infrastructure required to generate basic asces, region, segment,
382 * and page tables that do not make use of enhanced features like EDAT1. 321 * and page tables that do not make use of enhanced features like EDAT1.
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8ad73cb31121..b56f908b1395 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -70,6 +70,15 @@ do { \
70 tlb_remove_page((tlb), (pte)); \ 70 tlb_remove_page((tlb), (pte)); \
71} while (0) 71} while (0)
72 72
73#if CONFIG_PGTABLE_LEVELS > 2
74#define __pmd_free_tlb(tlb, pmdp, addr) \
75do { \
76 struct page *page = virt_to_page(pmdp); \
77 pgtable_pmd_page_dtor(page); \
78 tlb_remove_page((tlb), page); \
79} while (0);
80#endif
81
73static inline void check_pgt_cache(void) 82static inline void check_pgt_cache(void)
74{ 83{
75 quicklist_trim(QUICK_PT, NULL, 25, 16); 84 quicklist_trim(QUICK_PT, NULL, 25, 16);
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 77abe192fb43..bc77f3dd4261 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -11,133 +11,8 @@
11 11
12#ifdef CONFIG_MMU 12#ifdef CONFIG_MMU
13#include <linux/swap.h> 13#include <linux/swap.h>
14#include <asm/pgalloc.h>
15#include <asm/tlbflush.h>
16#include <asm/mmu_context.h>
17 14
18/* 15#include <asm-generic/tlb.h>
19 * TLB handling. This allows us to remove pages from the page
20 * tables, and efficiently handle the TLB issues.
21 */
22struct mmu_gather {
23 struct mm_struct *mm;
24 unsigned int fullmm;
25 unsigned long start, end;
26};
27
28static inline void init_tlb_gather(struct mmu_gather *tlb)
29{
30 tlb->start = TASK_SIZE;
31 tlb->end = 0;
32
33 if (tlb->fullmm) {
34 tlb->start = 0;
35 tlb->end = TASK_SIZE;
36 }
37}
38
39static inline void
40arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
41 unsigned long start, unsigned long end)
42{
43 tlb->mm = mm;
44 tlb->start = start;
45 tlb->end = end;
46 tlb->fullmm = !(start | (end+1));
47
48 init_tlb_gather(tlb);
49}
50
51static inline void
52arch_tlb_finish_mmu(struct mmu_gather *tlb,
53 unsigned long start, unsigned long end, bool force)
54{
55 if (tlb->fullmm || force)
56 flush_tlb_mm(tlb->mm);
57
58 /* keep the page table cache within bounds */
59 check_pgt_cache();
60}
61
62static inline void
63tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
64{
65 if (tlb->start > address)
66 tlb->start = address;
67 if (tlb->end < address + PAGE_SIZE)
68 tlb->end = address + PAGE_SIZE;
69}
70
71#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
72 tlb_remove_tlb_entry(tlb, ptep, address)
73
74/*
75 * In the case of tlb vma handling, we can optimise these away in the
76 * case where we're doing a full MM flush. When we're doing a munmap,
77 * the vmas are adjusted to only cover the region to be torn down.
78 */
79static inline void
80tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
81{
82 if (!tlb->fullmm)
83 flush_cache_range(vma, vma->vm_start, vma->vm_end);
84}
85
86static inline void
87tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
88{
89 if (!tlb->fullmm && tlb->end) {
90 flush_tlb_range(vma, tlb->start, tlb->end);
91 init_tlb_gather(tlb);
92 }
93}
94
95static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
96{
97}
98
99static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
100{
101}
102
103static inline void tlb_flush_mmu(struct mmu_gather *tlb)
104{
105}
106
107static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
108{
109 free_page_and_swap_cache(page);
110 return false; /* avoid calling tlb_flush_mmu */
111}
112
113static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
114{
115 __tlb_remove_page(tlb, page);
116}
117
118static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
119 struct page *page, int page_size)
120{
121 return __tlb_remove_page(tlb, page);
122}
123
124static inline void tlb_remove_page_size(struct mmu_gather *tlb,
125 struct page *page, int page_size)
126{
127 return tlb_remove_page(tlb, page);
128}
129
130#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
131static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
132 unsigned int page_size)
133{
134}
135
136#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
137#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
138#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
139
140#define tlb_migrate_finish(mm) do { } while (0)
141 16
142#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) 17#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
143extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); 18extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
@@ -157,11 +32,6 @@ static inline void tlb_unwire_entry(void)
157 32
158#else /* CONFIG_MMU */ 33#else /* CONFIG_MMU */
159 34
160#define tlb_start_vma(tlb, vma) do { } while (0)
161#define tlb_end_vma(tlb, vma) do { } while (0)
162#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
163#define tlb_flush(tlb) do { } while (0)
164
165#include <asm-generic/tlb.h> 35#include <asm-generic/tlb.h>
166 36
167#endif /* CONFIG_MMU */ 37#endif /* CONFIG_MMU */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 40f8f4f73fe8..db79290ed6d5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -63,6 +63,7 @@ config SPARC64
63 select HAVE_KRETPROBES 63 select HAVE_KRETPROBES
64 select HAVE_KPROBES 64 select HAVE_KPROBES
65 select HAVE_RCU_TABLE_FREE if SMP 65 select HAVE_RCU_TABLE_FREE if SMP
66 select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
66 select HAVE_MEMBLOCK_NODE_MAP 67 select HAVE_MEMBLOCK_NODE_MAP
67 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 68 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
68 select HAVE_DYNAMIC_FTRACE 69 select HAVE_DYNAMIC_FTRACE
diff --git a/arch/sparc/include/asm/tlb_32.h b/arch/sparc/include/asm/tlb_32.h
index 343cea19e573..5cd28a8793e3 100644
--- a/arch/sparc/include/asm/tlb_32.h
+++ b/arch/sparc/include/asm/tlb_32.h
@@ -2,24 +2,6 @@
2#ifndef _SPARC_TLB_H 2#ifndef _SPARC_TLB_H
3#define _SPARC_TLB_H 3#define _SPARC_TLB_H
4 4
5#define tlb_start_vma(tlb, vma) \
6do { \
7 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
8} while (0)
9
10#define tlb_end_vma(tlb, vma) \
11do { \
12 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
13} while (0)
14
15#define __tlb_remove_tlb_entry(tlb, pte, address) \
16 do { } while (0)
17
18#define tlb_flush(tlb) \
19do { \
20 flush_tlb_mm((tlb)->mm); \
21} while (0)
22
23#include <asm-generic/tlb.h> 5#include <asm-generic/tlb.h>
24 6
25#endif /* _SPARC_TLB_H */ 7#endif /* _SPARC_TLB_H */
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index dce6db147f24..70ee60383900 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -2,162 +2,8 @@
2#ifndef __UM_TLB_H 2#ifndef __UM_TLB_H
3#define __UM_TLB_H 3#define __UM_TLB_H
4 4
5#include <linux/pagemap.h>
6#include <linux/swap.h>
7#include <asm/percpu.h>
8#include <asm/pgalloc.h>
9#include <asm/tlbflush.h> 5#include <asm/tlbflush.h>
10 6#include <asm-generic/cacheflush.h>
11#define tlb_start_vma(tlb, vma) do { } while (0) 7#include <asm-generic/tlb.h>
12#define tlb_end_vma(tlb, vma) do { } while (0)
13#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
14
15/* struct mmu_gather is an opaque type used by the mm code for passing around
16 * any data needed by arch specific code for tlb_remove_page.
17 */
18struct mmu_gather {
19 struct mm_struct *mm;
20 unsigned int need_flush; /* Really unmapped some ptes? */
21 unsigned long start;
22 unsigned long end;
23 unsigned int fullmm; /* non-zero means full mm flush */
24};
25
26static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
27 unsigned long address)
28{
29 if (tlb->start > address)
30 tlb->start = address;
31 if (tlb->end < address + PAGE_SIZE)
32 tlb->end = address + PAGE_SIZE;
33}
34
35static inline void init_tlb_gather(struct mmu_gather *tlb)
36{
37 tlb->need_flush = 0;
38
39 tlb->start = TASK_SIZE;
40 tlb->end = 0;
41
42 if (tlb->fullmm) {
43 tlb->start = 0;
44 tlb->end = TASK_SIZE;
45 }
46}
47
48static inline void
49arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
50 unsigned long start, unsigned long end)
51{
52 tlb->mm = mm;
53 tlb->start = start;
54 tlb->end = end;
55 tlb->fullmm = !(start | (end+1));
56
57 init_tlb_gather(tlb);
58}
59
60extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
61 unsigned long end);
62
63static inline void
64tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
65{
66 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
67}
68
69static inline void
70tlb_flush_mmu_free(struct mmu_gather *tlb)
71{
72 init_tlb_gather(tlb);
73}
74
75static inline void
76tlb_flush_mmu(struct mmu_gather *tlb)
77{
78 if (!tlb->need_flush)
79 return;
80
81 tlb_flush_mmu_tlbonly(tlb);
82 tlb_flush_mmu_free(tlb);
83}
84
85/* arch_tlb_finish_mmu
86 * Called at the end of the shootdown operation to free up any resources
87 * that were required.
88 */
89static inline void
90arch_tlb_finish_mmu(struct mmu_gather *tlb,
91 unsigned long start, unsigned long end, bool force)
92{
93 if (force) {
94 tlb->start = start;
95 tlb->end = end;
96 tlb->need_flush = 1;
97 }
98 tlb_flush_mmu(tlb);
99
100 /* keep the page table cache within bounds */
101 check_pgt_cache();
102}
103
104/* tlb_remove_page
105 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
106 * while handling the additional races in SMP caused by other CPUs
107 * caching valid mappings in their TLBs.
108 */
109static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
110{
111 tlb->need_flush = 1;
112 free_page_and_swap_cache(page);
113 return false; /* avoid calling tlb_flush_mmu */
114}
115
116static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
117{
118 __tlb_remove_page(tlb, page);
119}
120
121static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
122 struct page *page, int page_size)
123{
124 return __tlb_remove_page(tlb, page);
125}
126
127static inline void tlb_remove_page_size(struct mmu_gather *tlb,
128 struct page *page, int page_size)
129{
130 return tlb_remove_page(tlb, page);
131}
132
133/**
134 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
135 *
136 * Record the fact that pte's were really umapped in ->need_flush, so we can
137 * later optimise away the tlb invalidate. This helps when userspace is
138 * unmapping already-unmapped pages, which happens quite a lot.
139 */
140#define tlb_remove_tlb_entry(tlb, ptep, address) \
141 do { \
142 tlb->need_flush = 1; \
143 __tlb_remove_tlb_entry(tlb, ptep, address); \
144 } while (0)
145
146#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
147 tlb_remove_tlb_entry(tlb, ptep, address)
148
149#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
150static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
151 unsigned int page_size)
152{
153}
154
155#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
156
157#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
158
159#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
160
161#define tlb_migrate_finish(mm) do {} while (0)
162 8
163#endif 9#endif
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 817d82608712..d83c8f70900d 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -20,6 +20,7 @@ config UNICORE32
20 select GENERIC_IOMAP 20 select GENERIC_IOMAP
21 select MODULES_USE_ELF_REL 21 select MODULES_USE_ELF_REL
22 select NEED_DMA_MAP_STATE 22 select NEED_DMA_MAP_STATE
23 select MMU_GATHER_NO_RANGE if MMU
23 help 24 help
24 UniCore-32 is 32-bit Instruction Set Architecture, 25 UniCore-32 is 32-bit Instruction Set Architecture,
25 including a series of low-power-consumption RISC chip 26 including a series of low-power-consumption RISC chip
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
index 9cca15cdae94..00a8477333f6 100644
--- a/arch/unicore32/include/asm/tlb.h
+++ b/arch/unicore32/include/asm/tlb.h
@@ -12,10 +12,9 @@
12#ifndef __UNICORE_TLB_H__ 12#ifndef __UNICORE_TLB_H__
13#define __UNICORE_TLB_H__ 13#define __UNICORE_TLB_H__
14 14
15#define tlb_start_vma(tlb, vma) do { } while (0) 15/*
16#define tlb_end_vma(tlb, vma) do { } while (0) 16 * unicore32 lacks an efficient flush_tlb_range(), use flush_tlb_mm().
17#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 17 */
18#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
19 18
20#define __pte_free_tlb(tlb, pte, addr) \ 19#define __pte_free_tlb(tlb, pte, addr) \
21 do { \ 20 do { \
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 62fc3fda1a05..406a0cf30c57 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -183,7 +183,6 @@ config X86
183 select HAVE_PERF_REGS 183 select HAVE_PERF_REGS
184 select HAVE_PERF_USER_STACK_DUMP 184 select HAVE_PERF_USER_STACK_DUMP
185 select HAVE_RCU_TABLE_FREE if PARAVIRT 185 select HAVE_RCU_TABLE_FREE if PARAVIRT
186 select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
187 select HAVE_REGS_AND_STACK_ACCESS_API 186 select HAVE_REGS_AND_STACK_ACCESS_API
188 select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION 187 select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
189 select HAVE_FUNCTION_ARG_ACCESS_API 188 select HAVE_FUNCTION_ARG_ACCESS_API
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 404b8b1d44f5..f23e7aaff4cd 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -6,6 +6,7 @@
6#define tlb_end_vma(tlb, vma) do { } while (0) 6#define tlb_end_vma(tlb, vma) do { } while (0)
7#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 7#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
8 8
9#define tlb_flush tlb_flush
9static inline void tlb_flush(struct mmu_gather *tlb); 10static inline void tlb_flush(struct mmu_gather *tlb);
10 11
11#include <asm-generic/tlb.h> 12#include <asm-generic/tlb.h>
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
index 0d766f9c1083..50889935138a 100644
--- a/arch/xtensa/include/asm/tlb.h
+++ b/arch/xtensa/include/asm/tlb.h
@@ -14,32 +14,6 @@
14#include <asm/cache.h> 14#include <asm/cache.h>
15#include <asm/page.h> 15#include <asm/page.h>
16 16
17#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
18
19/* Note, read http://lkml.org/lkml/2004/1/15/6 */
20
21# define tlb_start_vma(tlb,vma) do { } while (0)
22# define tlb_end_vma(tlb,vma) do { } while (0)
23
24#else
25
26# define tlb_start_vma(tlb, vma) \
27 do { \
28 if (!tlb->fullmm) \
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
30 } while(0)
31
32# define tlb_end_vma(tlb, vma) \
33 do { \
34 if (!tlb->fullmm) \
35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
36 } while(0)
37
38#endif
39
40#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
41#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
42
43#include <asm-generic/tlb.h> 17#include <asm-generic/tlb.h>
44 18
45#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) 19#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)