diff options
Diffstat (limited to 'include/asm-ia64/tlb.h')
-rw-r--r-- | include/asm-ia64/tlb.h | 245 |
1 files changed, 245 insertions, 0 deletions
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h new file mode 100644 index 000000000000..3a9a6d1be75c --- /dev/null +++ b/include/asm-ia64/tlb.h | |||
@@ -0,0 +1,245 @@ | |||
1 | #ifndef _ASM_IA64_TLB_H | ||
2 | #define _ASM_IA64_TLB_H | ||
3 | /* | ||
4 | * Based on <asm-generic/tlb.h>. | ||
5 | * | ||
6 | * Copyright (C) 2002-2003 Hewlett-Packard Co | ||
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
8 | */ | ||
9 | /* | ||
10 | * Removing a translation from a page table (including TLB-shootdown) is a four-step | ||
11 | * procedure: | ||
12 | * | ||
13 | * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory | ||
14 | * (this is a no-op on ia64). | ||
15 | * (2) Clear the relevant portions of the page-table | ||
16 | * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs | ||
17 | * (4) Release the pages that were freed up in step (2). | ||
18 | * | ||
19 | * Note that the ordering of these steps is crucial to avoid races on MP machines. | ||
20 | * | ||
21 | * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When | ||
22 | * unmapping a portion of the virtual address space, these hooks are called according to | ||
23 | * the following template: | ||
24 | * | ||
25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | ||
26 | * { | ||
27 | * for each vma that needs a shootdown do { | ||
28 | * tlb_start_vma(tlb, vma); | ||
29 | * for each page-table-entry PTE that needs to be removed do { | ||
30 | * tlb_remove_tlb_entry(tlb, pte, address); | ||
31 | * if (pte refers to a normal page) { | ||
32 | * tlb_remove_page(tlb, page); | ||
33 | * } | ||
34 | * } | ||
35 | * tlb_end_vma(tlb, vma); | ||
36 | * } | ||
37 | * } | ||
38 | * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM | ||
39 | */ | ||
40 | #include <linux/config.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/pagemap.h> | ||
43 | #include <linux/swap.h> | ||
44 | |||
45 | #include <asm/pgalloc.h> | ||
46 | #include <asm/processor.h> | ||
47 | #include <asm/tlbflush.h> | ||
48 | #include <asm/machvec.h> | ||
49 | |||
50 | #ifdef CONFIG_SMP | ||
51 | # define FREE_PTE_NR 2048 | ||
52 | # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | ||
53 | #else | ||
54 | # define FREE_PTE_NR 0 | ||
55 | # define tlb_fast_mode(tlb) (1) | ||
56 | #endif | ||
57 | |||
58 | struct mmu_gather { | ||
59 | struct mm_struct *mm; | ||
60 | unsigned int nr; /* == ~0U => fast mode */ | ||
61 | unsigned char fullmm; /* non-zero means full mm flush */ | ||
62 | unsigned char need_flush; /* really unmapped some PTEs? */ | ||
63 | unsigned long freed; /* number of pages freed */ | ||
64 | unsigned long start_addr; | ||
65 | unsigned long end_addr; | ||
66 | struct page *pages[FREE_PTE_NR]; | ||
67 | }; | ||
68 | |||
69 | /* Users of the generic TLB shootdown code must declare this storage space. */ | ||
70 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
71 | |||
72 | /* | ||
73 | * Flush the TLB for address range START to END and, if not in fast mode, release the | ||
74 | * freed pages that where gathered up to this point. | ||
75 | */ | ||
76 | static inline void | ||
77 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
78 | { | ||
79 | unsigned int nr; | ||
80 | |||
81 | if (!tlb->need_flush) | ||
82 | return; | ||
83 | tlb->need_flush = 0; | ||
84 | |||
85 | if (tlb->fullmm) { | ||
86 | /* | ||
87 | * Tearing down the entire address space. This happens both as a result | ||
88 | * of exit() and execve(). The latter case necessitates the call to | ||
89 | * flush_tlb_mm() here. | ||
90 | */ | ||
91 | flush_tlb_mm(tlb->mm); | ||
92 | } else if (unlikely (end - start >= 1024*1024*1024*1024UL | ||
93 | || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) | ||
94 | { | ||
95 | /* | ||
96 | * If we flush more than a tera-byte or across regions, we're probably | ||
97 | * better off just flushing the entire TLB(s). This should be very rare | ||
98 | * and is not worth optimizing for. | ||
99 | */ | ||
100 | flush_tlb_all(); | ||
101 | } else { | ||
102 | /* | ||
103 | * XXX fix me: flush_tlb_range() should take an mm pointer instead of a | ||
104 | * vma pointer. | ||
105 | */ | ||
106 | struct vm_area_struct vma; | ||
107 | |||
108 | vma.vm_mm = tlb->mm; | ||
109 | /* flush the address range from the tlb: */ | ||
110 | flush_tlb_range(&vma, start, end); | ||
111 | /* now flush the virt. page-table area mapping the address range: */ | ||
112 | flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); | ||
113 | } | ||
114 | |||
115 | /* lastly, release the freed pages */ | ||
116 | nr = tlb->nr; | ||
117 | if (!tlb_fast_mode(tlb)) { | ||
118 | unsigned long i; | ||
119 | tlb->nr = 0; | ||
120 | tlb->start_addr = ~0UL; | ||
121 | for (i = 0; i < nr; ++i) | ||
122 | free_page_and_swap_cache(tlb->pages[i]); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Return a pointer to an initialized struct mmu_gather. | ||
128 | */ | ||
129 | static inline struct mmu_gather * | ||
130 | tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) | ||
131 | { | ||
132 | struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers); | ||
133 | |||
134 | tlb->mm = mm; | ||
135 | /* | ||
136 | * Use fast mode if only 1 CPU is online. | ||
137 | * | ||
138 | * It would be tempting to turn on fast-mode for full_mm_flush as well. But this | ||
139 | * doesn't work because of speculative accesses and software prefetching: the page | ||
140 | * table of "mm" may (and usually is) the currently active page table and even | ||
141 | * though the kernel won't do any user-space accesses during the TLB shoot down, a | ||
142 | * compiler might use speculation or lfetch.fault on what happens to be a valid | ||
143 | * user-space address. This in turn could trigger a TLB miss fault (or a VHPT | ||
144 | * walk) and re-insert a TLB entry we just removed. Slow mode avoids such | ||
145 | * problems. (We could make fast-mode work by switching the current task to a | ||
146 | * different "mm" during the shootdown.) --davidm 08/02/2002 | ||
147 | */ | ||
148 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; | ||
149 | tlb->fullmm = full_mm_flush; | ||
150 | tlb->freed = 0; | ||
151 | tlb->start_addr = ~0UL; | ||
152 | return tlb; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Called at the end of the shootdown operation to free up any resources that were | ||
157 | * collected. The page table lock is still held at this point. | ||
158 | */ | ||
159 | static inline void | ||
160 | tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
161 | { | ||
162 | unsigned long freed = tlb->freed; | ||
163 | struct mm_struct *mm = tlb->mm; | ||
164 | unsigned long rss = get_mm_counter(mm, rss); | ||
165 | |||
166 | if (rss < freed) | ||
167 | freed = rss; | ||
168 | add_mm_counter(mm, rss, -freed); | ||
169 | /* | ||
170 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and | ||
171 | * tlb->end_addr. | ||
172 | */ | ||
173 | ia64_tlb_flush_mmu(tlb, start, end); | ||
174 | |||
175 | /* keep the page table cache within bounds */ | ||
176 | check_pgt_cache(); | ||
177 | } | ||
178 | |||
179 | static inline unsigned int | ||
180 | tlb_is_full_mm(struct mmu_gather *tlb) | ||
181 | { | ||
182 | return tlb->fullmm; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page | ||
187 | * must be delayed until after the TLB has been flushed (see comments at the beginning of | ||
188 | * this file). | ||
189 | */ | ||
190 | static inline void | ||
191 | tlb_remove_page (struct mmu_gather *tlb, struct page *page) | ||
192 | { | ||
193 | tlb->need_flush = 1; | ||
194 | |||
195 | if (tlb_fast_mode(tlb)) { | ||
196 | free_page_and_swap_cache(page); | ||
197 | return; | ||
198 | } | ||
199 | tlb->pages[tlb->nr++] = page; | ||
200 | if (tlb->nr >= FREE_PTE_NR) | ||
201 | ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any | ||
206 | * PTE, not just those pointing to (normal) physical memory. | ||
207 | */ | ||
208 | static inline void | ||
209 | __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) | ||
210 | { | ||
211 | if (tlb->start_addr == ~0UL) | ||
212 | tlb->start_addr = address; | ||
213 | tlb->end_addr = address + PAGE_SIZE; | ||
214 | } | ||
215 | |||
216 | #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) | ||
217 | |||
218 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
219 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
220 | |||
221 | #define tlb_remove_tlb_entry(tlb, ptep, addr) \ | ||
222 | do { \ | ||
223 | tlb->need_flush = 1; \ | ||
224 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ | ||
225 | } while (0) | ||
226 | |||
227 | #define pte_free_tlb(tlb, ptep) \ | ||
228 | do { \ | ||
229 | tlb->need_flush = 1; \ | ||
230 | __pte_free_tlb(tlb, ptep); \ | ||
231 | } while (0) | ||
232 | |||
233 | #define pmd_free_tlb(tlb, ptep) \ | ||
234 | do { \ | ||
235 | tlb->need_flush = 1; \ | ||
236 | __pmd_free_tlb(tlb, ptep); \ | ||
237 | } while (0) | ||
238 | |||
239 | #define pud_free_tlb(tlb, pudp) \ | ||
240 | do { \ | ||
241 | tlb->need_flush = 1; \ | ||
242 | __pud_free_tlb(tlb, pudp); \ | ||
243 | } while (0) | ||
244 | |||
245 | #endif /* _ASM_IA64_TLB_H */ | ||