aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/include/asm/tlb.h66
1 files changed, 46 insertions, 20 deletions
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 23cce999eb1c..c3ffe3e54edc 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,27 @@
47#include <asm/machvec.h> 47#include <asm/machvec.h>
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50# define FREE_PTE_NR 2048
51# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
52#else 51#else
53# define FREE_PTE_NR 0
54# define tlb_fast_mode(tlb) (1) 52# define tlb_fast_mode(tlb) (1)
55#endif 53#endif
56 54
55/*
56 * If we can't allocate a page to make a big batch of page pointers
57 * to work on, then just handle a few from the on-stack structure.
58 */
59#define IA64_GATHER_BUNDLE 8
60
57struct mmu_gather { 61struct mmu_gather {
58 struct mm_struct *mm; 62 struct mm_struct *mm;
59 unsigned int nr; /* == ~0U => fast mode */ 63 unsigned int nr; /* == ~0U => fast mode */
64 unsigned int max;
60 unsigned char fullmm; /* non-zero means full mm flush */ 65 unsigned char fullmm; /* non-zero means full mm flush */
61 unsigned char need_flush; /* really unmapped some PTEs? */ 66 unsigned char need_flush; /* really unmapped some PTEs? */
62 unsigned long start_addr; 67 unsigned long start_addr;
63 unsigned long end_addr; 68 unsigned long end_addr;
64 struct page *pages[FREE_PTE_NR]; 69 struct page **pages;
70 struct page *local[IA64_GATHER_BUNDLE];
65}; 71};
66 72
67struct ia64_tr_entry { 73struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
90#define RR_RID_MASK 0x00000000ffffff00L 96#define RR_RID_MASK 0x00000000ffffff00L
91#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 97#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
92 98
93/* Users of the generic TLB shootdown code must declare this storage space. */
94DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
95
96/* 99/*
97 * Flush the TLB for address range START to END and, if not in fast mode, release the 100 * Flush the TLB for address range START to END and, if not in fast mode, release the
98 * freed pages that where gathered up to this point. 101 * freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
147 } 150 }
148} 151}
149 152
150/* 153static inline void __tlb_alloc_page(struct mmu_gather *tlb)
151 * Return a pointer to an initialized struct mmu_gather.
152 */
153static inline struct mmu_gather *
154tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
155{ 154{
156 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 155 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
157 156
157 if (addr) {
158 tlb->pages = (void *)addr;
159 tlb->max = PAGE_SIZE / sizeof(void *);
160 }
161}
162
163
164static inline void
165tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
166{
158 tlb->mm = mm; 167 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local;
159 /* 170 /*
160 * Use fast mode if only 1 CPU is online. 171 * Use fast mode if only 1 CPU is online.
161 * 172 *
@@ -172,7 +183,6 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
172 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; 183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
173 tlb->fullmm = full_mm_flush; 184 tlb->fullmm = full_mm_flush;
174 tlb->start_addr = ~0UL; 185 tlb->start_addr = ~0UL;
175 return tlb;
176} 186}
177 187
178/* 188/*
@@ -180,7 +190,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
180 * collected. 190 * collected.
181 */ 191 */
182static inline void 192static inline void
183tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 193tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
184{ 194{
185 /* 195 /*
186 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 196 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,7 +201,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
191 /* keep the page table cache within bounds */ 201 /* keep the page table cache within bounds */
192 check_pgt_cache(); 202 check_pgt_cache();
193 203
194 put_cpu_var(mmu_gathers); 204 if (tlb->pages != tlb->local)
205 free_pages((unsigned long)tlb->pages, 0);
195} 206}
196 207
197/* 208/*
@@ -199,18 +210,33 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
199 * must be delayed until after the TLB has been flushed (see comments at the beginning of 210 * must be delayed until after the TLB has been flushed (see comments at the beginning of
200 * this file). 211 * this file).
201 */ 212 */
202static inline void 213static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
203tlb_remove_page (struct mmu_gather *tlb, struct page *page)
204{ 214{
205 tlb->need_flush = 1; 215 tlb->need_flush = 1;
206 216
207 if (tlb_fast_mode(tlb)) { 217 if (tlb_fast_mode(tlb)) {
208 free_page_and_swap_cache(page); 218 free_page_and_swap_cache(page);
209 return; 219 return 1; /* avoid calling tlb_flush_mmu */
210 } 220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb);
224
211 tlb->pages[tlb->nr++] = page; 225 tlb->pages[tlb->nr++] = page;
212 if (tlb->nr >= FREE_PTE_NR) 226 VM_BUG_ON(tlb->nr > tlb->max);
213 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 227
228 return tlb->max - tlb->nr;
229}
230
231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234}
235
236static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237{
238 if (!__tlb_remove_page(tlb, page))
239 tlb_flush_mmu(tlb);
214} 240}
215 241
216/* 242/*