aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/pgtable.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:44 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-22 06:52:48 -0400
commitba8a9229ab9e80278c28ad68b15053f65b2b0a7c (patch)
treed73e4f7d352d3b3edf8888973528cb7dd3e953f9 /include/asm-s390/pgtable.h
parente3d3683d1402c1737687cb698451d545f57c32a7 (diff)
[S390] tlb flush fix.
The current tlb flushing code for page table entries violates the s390 architecture in a small detail. The relevant section from the principles of operation (SA22-7832-02 page 3-47): "A valid table entry must not be changed while it is attached to any CPU and may be used for translation by that CPU except to (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page-table entry, or (3) make a change by means of a COMPARE AND SWAP AND PURGE instruction that purges the TLB." That means if one thread of a multithreaded applciation uses a vma while another thread does an unmap on it, the page table entries of that vma needs to get removed with IPTE, IDTE or CSP. In some strange and rare situations a cpu could check-stop (die) because a entry has been pushed out of the TLB that is still needed to complete a (milli-coded) instruction. I've never seen it happen with the current code on any of the supported machines, so right now this is a theoretical problem. But I want to fix it nevertheless, to avoid headaches in the futures. To get this implemented correctly without changing common code the primitives ptep_get_and_clear, ptep_get_and_clear_full and ptep_set_wrprotect need to use the IPTE instruction to invalidate the pte before the new pte value gets stored. If IPTE is always used for the three primitives three important operations will have a performace hit: fork, mprotect and exit_mmap. Time for some workarounds: * 1: ptep_get_and_clear_full is used in unmap_vmas to remove page tables entries in a batched tlb gather operation. If the mmu_gather context passed to unmap_vmas has been started with full_mm_flush==1 or if only one cpu is online or if the only user of a mm_struct is the current process then the fullmm indication in the mmu_gather context is set to one. All TLBs for mm_struct are flushed by the tlb_gather_mmu call. No new TLBs can be created while the unmap is in progress. In this case ptep_get_and_clear_full clears the ptes with a simple store. * 2: ptep_get_and_clear is used in change_protection to clear the ptes from the page tables before they are reentered with the new access flags. At the end of the update flush_tlb_range clears the remaining TLBs. In general the ptep_get_and_clear has to issue IPTE for each pte and flush_tlb_range is a nop. But if there is only one user of the mm_struct then ptep_get_and_clear uses simple stores to do the update and flush_tlb_range will flush the TLBs. * 3: Similar to 2, ptep_set_wrprotect is used in copy_page_range for a fork to make all ptes of a cow mapping read-only. At the end of of copy_page_range dup_mmap will flush the TLBs with a call to flush_tlb_mm. Check for mm->mm_users and if there is only one user avoid using IPTE in ptep_set_wrprotect and let flush_tlb_mm clear the TLBs. Overall for single threaded programs the tlb flush code now performs better, for multi threaded programs it is slightly worse. In particular exit_mmap() now does a single IDTE for the mm and then just frees every page cache reference and every page table page directly without a delay over the mmu_gather structure. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r--include/asm-s390/pgtable.h99
1 files changed, 71 insertions, 28 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 39bb5192dc31..b424ab21f8bd 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -424,7 +424,8 @@ static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
424 * within a page table are directly modified. Thus, the following 424 * within a page table are directly modified. Thus, the following
425 * hook is made available. 425 * hook is made available.
426 */ 426 */
427static inline void set_pte(pte_t *pteptr, pte_t pteval) 427static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
428 pte_t *pteptr, pte_t pteval)
428{ 429{
429 pte_t *shadow_pte = get_shadow_pte(pteptr); 430 pte_t *shadow_pte = get_shadow_pte(pteptr);
430 431
@@ -437,7 +438,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
437 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 438 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
438 } 439 }
439} 440}
440#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
441 441
442/* 442/*
443 * pgd/pmd/pte query functions 443 * pgd/pmd/pte query functions
@@ -508,7 +508,8 @@ static inline int pte_file(pte_t pte)
508 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 508 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
509} 509}
510 510
511#define pte_same(a,b) (pte_val(a) == pte_val(b)) 511#define __HAVE_ARCH_PTE_SAME
512#define pte_same(a,b) (pte_val(a) == pte_val(b))
512 513
513/* 514/*
514 * query functions pte_write/pte_dirty/pte_young only work if 515 * query functions pte_write/pte_dirty/pte_young only work if
@@ -663,24 +664,19 @@ static inline pte_t pte_mkyoung(pte_t pte)
663 return pte; 664 return pte;
664} 665}
665 666
666static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 667#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
668static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
669 unsigned long addr, pte_t *ptep)
667{ 670{
668 return 0; 671 return 0;
669} 672}
670 673
671static inline int 674#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
672ptep_clear_flush_young(struct vm_area_struct *vma, 675static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
673 unsigned long address, pte_t *ptep) 676 unsigned long address, pte_t *ptep)
674{ 677{
675 /* No need to flush TLB; bits are in storage key */ 678 /* No need to flush TLB; bits are in storage key */
676 return ptep_test_and_clear_young(vma, address, ptep); 679 return 0;
677}
678
679static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
680{
681 pte_t pte = *ptep;
682 pte_clear(mm, addr, ptep);
683 return pte;
684} 680}
685 681
686static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 682static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@@ -709,6 +705,32 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
709 __ptep_ipte(address, ptep); 705 __ptep_ipte(address, ptep);
710} 706}
711 707
708/*
709 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
710 * both clear the TLB for the unmapped pte. The reason is that
711 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
712 * to modify an active pte. The sequence is
713 * 1) ptep_get_and_clear
714 * 2) set_pte_at
715 * 3) flush_tlb_range
716 * On s390 the tlb needs to get flushed with the modification of the pte
717 * if the pte is active. The only way how this can be implemented is to
718 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
719 * is a nop.
720 */
721#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
722#define ptep_get_and_clear(__mm, __address, __ptep) \
723({ \
724 pte_t __pte = *(__ptep); \
725 if (atomic_read(&(__mm)->mm_users) > 1 || \
726 (__mm) != current->active_mm) \
727 ptep_invalidate(__address, __ptep); \
728 else \
729 pte_clear((__mm), (__address), (__ptep)); \
730 __pte; \
731})
732
733#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
712static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 734static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
713 unsigned long address, pte_t *ptep) 735 unsigned long address, pte_t *ptep)
714{ 736{
@@ -717,12 +739,40 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
717 return pte; 739 return pte;
718} 740}
719 741
720static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 742/*
743 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
744 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
745 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
746 * cannot be accessed while the batched unmap is running. In this case
747 * full==1 and a simple pte_clear is enough. See tlb.h.
748 */
749#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
750static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
751 unsigned long addr,
752 pte_t *ptep, int full)
721{ 753{
722 pte_t old_pte = *ptep; 754 pte_t pte = *ptep;
723 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 755
756 if (full)
757 pte_clear(mm, addr, ptep);
758 else
759 ptep_invalidate(addr, ptep);
760 return pte;
724} 761}
725 762
763#define __HAVE_ARCH_PTEP_SET_WRPROTECT
764#define ptep_set_wrprotect(__mm, __addr, __ptep) \
765({ \
766 pte_t __pte = *(__ptep); \
767 if (pte_write(__pte)) { \
768 if (atomic_read(&(__mm)->mm_users) > 1 || \
769 (__mm) != current->active_mm) \
770 ptep_invalidate(__addr, __ptep); \
771 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
772 } \
773})
774
775#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
726#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 776#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
727({ \ 777({ \
728 int __changed = !pte_same(*(__ptep), __entry); \ 778 int __changed = !pte_same(*(__ptep), __entry); \
@@ -740,11 +790,13 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
740 * should therefore only be called if it is not mapped in any 790 * should therefore only be called if it is not mapped in any
741 * address space. 791 * address space.
742 */ 792 */
793#define __HAVE_ARCH_PAGE_TEST_DIRTY
743static inline int page_test_dirty(struct page *page) 794static inline int page_test_dirty(struct page *page)
744{ 795{
745 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; 796 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
746} 797}
747 798
799#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
748static inline void page_clear_dirty(struct page *page) 800static inline void page_clear_dirty(struct page *page)
749{ 801{
750 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); 802 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
@@ -753,6 +805,7 @@ static inline void page_clear_dirty(struct page *page)
753/* 805/*
754 * Test and clear referenced bit in storage key. 806 * Test and clear referenced bit in storage key.
755 */ 807 */
808#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
756static inline int page_test_and_clear_young(struct page *page) 809static inline int page_test_and_clear_young(struct page *page)
757{ 810{
758 unsigned long physpage = page_to_phys(page); 811 unsigned long physpage = page_to_phys(page);
@@ -930,16 +983,6 @@ extern int remove_shared_memory(unsigned long start, unsigned long size);
930#define __HAVE_ARCH_MEMMAP_INIT 983#define __HAVE_ARCH_MEMMAP_INIT
931extern void memmap_init(unsigned long, int, unsigned long, unsigned long); 984extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
932 985
933#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
934#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
935#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
936#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
937#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
938#define __HAVE_ARCH_PTEP_SET_WRPROTECT
939#define __HAVE_ARCH_PTE_SAME
940#define __HAVE_ARCH_PAGE_TEST_DIRTY
941#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
942#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
943#include <asm-generic/pgtable.h> 986#include <asm-generic/pgtable.h>
944 987
945#endif /* _S390_PAGE_H */ 988#endif /* _S390_PAGE_H */