aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2010-08-24 03:26:21 -0400
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-08-24 03:26:34 -0400
commit050eef364ad700590a605a0749f825cab4834b1e (patch)
tree2714c9cf7edcbf394971cc2c929e5ab2ea34d6a6 /arch/s390/include/asm/pgtable.h
parent7af048dc7639db5202c56fecf2346c310647a218 (diff)
[S390] fix tlb flushing vs. concurrent /proc accesses
The tlb flushing code uses the mm_users field of the mm_struct to decide if each page table entry needs to be flushed individually with IPTE or if a global flush for the mm_struct is sufficient after all page table updates have been done. The comment for mm_users says "How many users with user space?" but the /proc code increases mm_users after it found the process structure by pid without creating a new user process. Which makes mm_users useless for the decision between the two tlb flusing methods. The current code can be confused to not flush tlb entries by a concurrent access to /proc files if e.g. a fork is in progres. The solution for this problem is to make the tlb flushing logic independent from the mm_users field. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 89a504c3f12e..3157441ee1da 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -880,7 +880,8 @@ static inline void ptep_invalidate(struct mm_struct *mm,
880#define ptep_get_and_clear(__mm, __address, __ptep) \ 880#define ptep_get_and_clear(__mm, __address, __ptep) \
881({ \ 881({ \
882 pte_t __pte = *(__ptep); \ 882 pte_t __pte = *(__ptep); \
883 if (atomic_read(&(__mm)->mm_users) > 1 || \ 883 (__mm)->context.flush_mm = 1; \
884 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
884 (__mm) != current->active_mm) \ 885 (__mm) != current->active_mm) \
885 ptep_invalidate(__mm, __address, __ptep); \ 886 ptep_invalidate(__mm, __address, __ptep); \
886 else \ 887 else \
@@ -923,7 +924,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
923({ \ 924({ \
924 pte_t __pte = *(__ptep); \ 925 pte_t __pte = *(__ptep); \
925 if (pte_write(__pte)) { \ 926 if (pte_write(__pte)) { \
926 if (atomic_read(&(__mm)->mm_users) > 1 || \ 927 (__mm)->context.flush_mm = 1; \
928 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
927 (__mm) != current->active_mm) \ 929 (__mm) != current->active_mm) \
928 ptep_invalidate(__mm, __addr, __ptep); \ 930 ptep_invalidate(__mm, __addr, __ptep); \
929 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 931 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \