aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:11:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:15 -0400
commit7a95a2c80748bb91e0bf4b8d58396542e1319d21 (patch)
tree0b942b8d5fef0667e14f6b74fce840c09938edea /arch/ia64/include
parent1e56a56410bb64bce62d44563e35a143fc2d515f (diff)
ia64: mmu_gather rework
Fix up the ia64 mmu_gather code to conform to the new API. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Tony Luck <tony.luck@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/tlb.h66
1 files changed, 46 insertions, 20 deletions
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 23cce999eb1c..c3ffe3e54edc 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,27 @@
47#include <asm/machvec.h> 47#include <asm/machvec.h>
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50# define FREE_PTE_NR 2048
51# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
52#else 51#else
53# define FREE_PTE_NR 0
54# define tlb_fast_mode(tlb) (1) 52# define tlb_fast_mode(tlb) (1)
55#endif 53#endif
56 54
55/*
56 * If we can't allocate a page to make a big batch of page pointers
57 * to work on, then just handle a few from the on-stack structure.
58 */
59#define IA64_GATHER_BUNDLE 8
60
57struct mmu_gather { 61struct mmu_gather {
58 struct mm_struct *mm; 62 struct mm_struct *mm;
59 unsigned int nr; /* == ~0U => fast mode */ 63 unsigned int nr; /* == ~0U => fast mode */
64 unsigned int max;
60 unsigned char fullmm; /* non-zero means full mm flush */ 65 unsigned char fullmm; /* non-zero means full mm flush */
61 unsigned char need_flush; /* really unmapped some PTEs? */ 66 unsigned char need_flush; /* really unmapped some PTEs? */
62 unsigned long start_addr; 67 unsigned long start_addr;
63 unsigned long end_addr; 68 unsigned long end_addr;
64 struct page *pages[FREE_PTE_NR]; 69 struct page **pages;
70 struct page *local[IA64_GATHER_BUNDLE];
65}; 71};
66 72
67struct ia64_tr_entry { 73struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
90#define RR_RID_MASK 0x00000000ffffff00L 96#define RR_RID_MASK 0x00000000ffffff00L
91#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 97#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
92 98
93/* Users of the generic TLB shootdown code must declare this storage space. */
94DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
95
96/* 99/*
97 * Flush the TLB for address range START to END and, if not in fast mode, release the 100 * Flush the TLB for address range START to END and, if not in fast mode, release the
98 * freed pages that where gathered up to this point. 101 * freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
147 } 150 }
148} 151}
149 152
150/* 153static inline void __tlb_alloc_page(struct mmu_gather *tlb)
151 * Return a pointer to an initialized struct mmu_gather.
152 */
153static inline struct mmu_gather *
154tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
155{ 154{
156 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 155 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
157 156
157 if (addr) {
158 tlb->pages = (void *)addr;
159 tlb->max = PAGE_SIZE / sizeof(void *);
160 }
161}
162
163
164static inline void
165tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
166{
158 tlb->mm = mm; 167 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local;
159 /* 170 /*
160 * Use fast mode if only 1 CPU is online. 171 * Use fast mode if only 1 CPU is online.
161 * 172 *
@@ -172,7 +183,6 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
172 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; 183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
173 tlb->fullmm = full_mm_flush; 184 tlb->fullmm = full_mm_flush;
174 tlb->start_addr = ~0UL; 185 tlb->start_addr = ~0UL;
175 return tlb;
176} 186}
177 187
178/* 188/*
@@ -180,7 +190,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
180 * collected. 190 * collected.
181 */ 191 */
182static inline void 192static inline void
183tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 193tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
184{ 194{
185 /* 195 /*
186 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 196 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,7 +201,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
191 /* keep the page table cache within bounds */ 201 /* keep the page table cache within bounds */
192 check_pgt_cache(); 202 check_pgt_cache();
193 203
194 put_cpu_var(mmu_gathers); 204 if (tlb->pages != tlb->local)
205 free_pages((unsigned long)tlb->pages, 0);
195} 206}
196 207
197/* 208/*
@@ -199,18 +210,33 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
199 * must be delayed until after the TLB has been flushed (see comments at the beginning of 210 * must be delayed until after the TLB has been flushed (see comments at the beginning of
200 * this file). 211 * this file).
201 */ 212 */
202static inline void 213static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
203tlb_remove_page (struct mmu_gather *tlb, struct page *page)
204{ 214{
205 tlb->need_flush = 1; 215 tlb->need_flush = 1;
206 216
207 if (tlb_fast_mode(tlb)) { 217 if (tlb_fast_mode(tlb)) {
208 free_page_and_swap_cache(page); 218 free_page_and_swap_cache(page);
209 return; 219 return 1; /* avoid calling tlb_flush_mmu */
210 } 220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb);
224
211 tlb->pages[tlb->nr++] = page; 225 tlb->pages[tlb->nr++] = page;
212 if (tlb->nr >= FREE_PTE_NR) 226 VM_BUG_ON(tlb->nr > tlb->max);
213 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 227
228 return tlb->max - tlb->nr;
229}
230
231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234}
235
236static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237{
238 if (!__tlb_remove_page(tlb, page))
239 tlb_flush_mmu(tlb);
214} 240}
215 241
216/* 242/*