aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/pgtable.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-04-10 03:09:37 -0400
committerPaul Mackerras <paulus@samba.org>2007-04-12 14:09:38 -0400
commita741e67969577163a4cfc78d7fd2753219087ef1 (patch)
treebac4162aaf15367e896429afa60465e201c9204c /include/asm-powerpc/pgtable.h
parente4ee3891db35aa9a069bb403c2a66a8fbfa274d6 (diff)
[POWERPC] Make tlb flush batch use lazy MMU mode
The current tlb flush code on powerpc 64 bits has a subtle race since we lost the page table lock due to the possible faulting in of new PTEs after a previous one has been removed but before the corresponding hash entry has been evicted, which can leads to all sort of fatal problems. This patch reworks the batch code completely. It doesn't use the mmu_gather stuff anymore. Instead, we use the lazy mmu hooks that were added by the paravirt code. They have the nice property that the enter/leave lazy mmu mode pair is always fully contained by the PTE lock for a given range of PTEs. Thus we can guarantee that all batches are flushed on a given CPU before it drops that lock. We also generalize batching for any PTE update that require a flush. Batching is now enabled on a CPU by arch_enter_lazy_mmu_mode() and disabled by arch_leave_lazy_mmu_mode(). The code epects that this is always contained within a PTE lock section so no preemption can happen and no PTE insertion in that range from another CPU. When batching is enabled on a CPU, every PTE updates that need a hash flush will use the batch for that flush. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/pgtable.h')
-rw-r--r--include/asm-powerpc/pgtable.h50
1 files changed, 16 insertions, 34 deletions
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 10f52743f4f..c7142c7e0e0 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -272,7 +272,10 @@ static inline pte_t pte_mkhuge(pte_t pte) {
272 return pte; } 272 return pte; }
273 273
274/* Atomic PTE updates */ 274/* Atomic PTE updates */
275static inline unsigned long pte_update(pte_t *p, unsigned long clr) 275static inline unsigned long pte_update(struct mm_struct *mm,
276 unsigned long addr,
277 pte_t *ptep, unsigned long clr,
278 int huge)
276{ 279{
277 unsigned long old, tmp; 280 unsigned long old, tmp;
278 281
@@ -283,20 +286,15 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr)
283 andc %1,%0,%4 \n\ 286 andc %1,%0,%4 \n\
284 stdcx. %1,0,%3 \n\ 287 stdcx. %1,0,%3 \n\
285 bne- 1b" 288 bne- 1b"
286 : "=&r" (old), "=&r" (tmp), "=m" (*p) 289 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
287 : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) 290 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
288 : "cc" ); 291 : "cc" );
292
293 if (old & _PAGE_HASHPTE)
294 hpte_need_flush(mm, addr, ptep, old, huge);
289 return old; 295 return old;
290} 296}
291 297
292/* PTE updating functions, this function puts the PTE in the
293 * batch, doesn't actually triggers the hash flush immediately,
294 * you need to call flush_tlb_pending() to do that.
295 * Pass -1 for "normal" size (4K or 64K)
296 */
297extern void hpte_update(struct mm_struct *mm, unsigned long addr,
298 pte_t *ptep, unsigned long pte, int huge);
299
300static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 298static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
301 unsigned long addr, pte_t *ptep) 299 unsigned long addr, pte_t *ptep)
302{ 300{
@@ -304,11 +302,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
304 302
305 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 303 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
306 return 0; 304 return 0;
307 old = pte_update(ptep, _PAGE_ACCESSED); 305 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
308 if (old & _PAGE_HASHPTE) {
309 hpte_update(mm, addr, ptep, old, 0);
310 flush_tlb_pending();
311 }
312 return (old & _PAGE_ACCESSED) != 0; 306 return (old & _PAGE_ACCESSED) != 0;
313} 307}
314#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 308#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -331,9 +325,7 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
331 325
332 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) 326 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
333 return 0; 327 return 0;
334 old = pte_update(ptep, _PAGE_DIRTY); 328 old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
335 if (old & _PAGE_HASHPTE)
336 hpte_update(mm, addr, ptep, old, 0);
337 return (old & _PAGE_DIRTY) != 0; 329 return (old & _PAGE_DIRTY) != 0;
338} 330}
339#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 331#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
@@ -352,9 +344,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
352 344
353 if ((pte_val(*ptep) & _PAGE_RW) == 0) 345 if ((pte_val(*ptep) & _PAGE_RW) == 0)
354 return; 346 return;
355 old = pte_update(ptep, _PAGE_RW); 347 old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
356 if (old & _PAGE_HASHPTE)
357 hpte_update(mm, addr, ptep, old, 0);
358} 348}
359 349
360/* 350/*
@@ -378,7 +368,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
378({ \ 368({ \
379 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ 369 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
380 __ptep); \ 370 __ptep); \
381 flush_tlb_page(__vma, __address); \
382 __dirty; \ 371 __dirty; \
383}) 372})
384 373
@@ -386,20 +375,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
386static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 375static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
387 unsigned long addr, pte_t *ptep) 376 unsigned long addr, pte_t *ptep)
388{ 377{
389 unsigned long old = pte_update(ptep, ~0UL); 378 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
390
391 if (old & _PAGE_HASHPTE)
392 hpte_update(mm, addr, ptep, old, 0);
393 return __pte(old); 379 return __pte(old);
394} 380}
395 381
396static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 382static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
397 pte_t * ptep) 383 pte_t * ptep)
398{ 384{
399 unsigned long old = pte_update(ptep, ~0UL); 385 pte_update(mm, addr, ptep, ~0UL, 0);
400
401 if (old & _PAGE_HASHPTE)
402 hpte_update(mm, addr, ptep, old, 0);
403} 386}
404 387
405/* 388/*
@@ -408,10 +391,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
408static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 391static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
409 pte_t *ptep, pte_t pte) 392 pte_t *ptep, pte_t pte)
410{ 393{
411 if (pte_present(*ptep)) { 394 if (pte_present(*ptep))
412 pte_clear(mm, addr, ptep); 395 pte_clear(mm, addr, ptep);
413 flush_tlb_pending();
414 }
415 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 396 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
416 *ptep = pte; 397 *ptep = pte;
417} 398}
@@ -522,6 +503,7 @@ void pgtable_cache_init(void);
522 return pt; 503 return pt;
523} 504}
524 505
506
525#include <asm-generic/pgtable.h> 507#include <asm-generic/pgtable.h>
526 508
527#endif /* __ASSEMBLY__ */ 509#endif /* __ASSEMBLY__ */