diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2010-08-24 03:26:21 -0400 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2010-08-24 03:26:34 -0400 |
commit | 050eef364ad700590a605a0749f825cab4834b1e (patch) | |
tree | 2714c9cf7edcbf394971cc2c929e5ab2ea34d6a6 /arch/s390 | |
parent | 7af048dc7639db5202c56fecf2346c310647a218 (diff) |
[S390] fix tlb flushing vs. concurrent /proc accesses
The tlb flushing code uses the mm_users field of the mm_struct to
decide if each page table entry needs to be flushed individually with
IPTE or if a global flush for the mm_struct is sufficient after all page
table updates have been done. The comment for mm_users says "How many
users with user space?" but the /proc code increases mm_users after it
found the process structure by pid without creating a new user process.
Which makes mm_users useless for the decision between the two tlb
flusing methods. The current code can be confused to not flush tlb
entries by a concurrent access to /proc files if e.g. a fork is in
progres. The solution for this problem is to make the tlb flushing
logic independent from the mm_users field.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/hugetlb.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 9 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 6 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 6 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 2 |
8 files changed, 28 insertions, 6 deletions
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 670a1d1745d2..bb8343d157bc 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
@@ -97,6 +97,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
97 | { | 97 | { |
98 | pte_t pte = huge_ptep_get(ptep); | 98 | pte_t pte = huge_ptep_get(ptep); |
99 | 99 | ||
100 | mm->context.flush_mm = 1; | ||
100 | pmd_clear((pmd_t *) ptep); | 101 | pmd_clear((pmd_t *) ptep); |
101 | return pte; | 102 | return pte; |
102 | } | 103 | } |
@@ -167,7 +168,8 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, | |||
167 | ({ \ | 168 | ({ \ |
168 | pte_t __pte = huge_ptep_get(__ptep); \ | 169 | pte_t __pte = huge_ptep_get(__ptep); \ |
169 | if (pte_write(__pte)) { \ | 170 | if (pte_write(__pte)) { \ |
170 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | 171 | (__mm)->context.flush_mm = 1; \ |
172 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ | ||
171 | (__mm) != current->active_mm) \ | 173 | (__mm) != current->active_mm) \ |
172 | huge_ptep_invalidate(__mm, __addr, __ptep); \ | 174 | huge_ptep_invalidate(__mm, __addr, __ptep); \ |
173 | set_huge_pte_at(__mm, __addr, __ptep, \ | 175 | set_huge_pte_at(__mm, __addr, __ptep, \ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 99e3409102b9..78522cdefdd4 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | typedef struct { | 4 | typedef struct { |
5 | atomic_t attach_count; | ||
6 | unsigned int flush_mm; | ||
5 | spinlock_t list_lock; | 7 | spinlock_t list_lock; |
6 | struct list_head crst_list; | 8 | struct list_head crst_list; |
7 | struct list_head pgtable_list; | 9 | struct list_head pgtable_list; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 976e273988c2..a6f0e7cc9cde 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -11,11 +11,14 @@ | |||
11 | 11 | ||
12 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | #include <asm/tlbflush.h> | ||
14 | #include <asm-generic/mm_hooks.h> | 15 | #include <asm-generic/mm_hooks.h> |
15 | 16 | ||
16 | static inline int init_new_context(struct task_struct *tsk, | 17 | static inline int init_new_context(struct task_struct *tsk, |
17 | struct mm_struct *mm) | 18 | struct mm_struct *mm) |
18 | { | 19 | { |
20 | atomic_set(&mm->context.attach_count, 0); | ||
21 | mm->context.flush_mm = 0; | ||
19 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; | 22 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
20 | #ifdef CONFIG_64BIT | 23 | #ifdef CONFIG_64BIT |
21 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 24 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
@@ -76,6 +79,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
76 | { | 79 | { |
77 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); | 80 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
78 | update_mm(next, tsk); | 81 | update_mm(next, tsk); |
82 | atomic_dec(&prev->context.attach_count); | ||
83 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | ||
84 | atomic_inc(&next->context.attach_count); | ||
85 | /* Check for TLBs not flushed yet */ | ||
86 | if (next->context.flush_mm) | ||
87 | __tlb_flush_mm(next); | ||
79 | } | 88 | } |
80 | 89 | ||
81 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 90 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 89a504c3f12e..3157441ee1da 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -880,7 +880,8 @@ static inline void ptep_invalidate(struct mm_struct *mm, | |||
880 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | 880 | #define ptep_get_and_clear(__mm, __address, __ptep) \ |
881 | ({ \ | 881 | ({ \ |
882 | pte_t __pte = *(__ptep); \ | 882 | pte_t __pte = *(__ptep); \ |
883 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | 883 | (__mm)->context.flush_mm = 1; \ |
884 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ | ||
884 | (__mm) != current->active_mm) \ | 885 | (__mm) != current->active_mm) \ |
885 | ptep_invalidate(__mm, __address, __ptep); \ | 886 | ptep_invalidate(__mm, __address, __ptep); \ |
886 | else \ | 887 | else \ |
@@ -923,7 +924,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
923 | ({ \ | 924 | ({ \ |
924 | pte_t __pte = *(__ptep); \ | 925 | pte_t __pte = *(__ptep); \ |
925 | if (pte_write(__pte)) { \ | 926 | if (pte_write(__pte)) { \ |
926 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | 927 | (__mm)->context.flush_mm = 1; \ |
928 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ | ||
927 | (__mm) != current->active_mm) \ | 929 | (__mm) != current->active_mm) \ |
928 | ptep_invalidate(__mm, __addr, __ptep); \ | 930 | ptep_invalidate(__mm, __addr, __ptep); \ |
929 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ | 931 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 81150b053689..fd1c00d08bf5 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -50,8 +50,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, | |||
50 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | 50 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
51 | 51 | ||
52 | tlb->mm = mm; | 52 | tlb->mm = mm; |
53 | tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) || | 53 | tlb->fullmm = full_mm_flush; |
54 | (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm); | ||
55 | tlb->nr_ptes = 0; | 54 | tlb->nr_ptes = 0; |
56 | tlb->nr_pxds = TLB_NR_PTRS; | 55 | tlb->nr_pxds = TLB_NR_PTRS; |
57 | if (tlb->fullmm) | 56 | if (tlb->fullmm) |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 304cffa623e1..29d5d6d4becc 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -94,8 +94,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
94 | 94 | ||
95 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | 95 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) |
96 | { | 96 | { |
97 | if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm) | 97 | spin_lock(&mm->page_table_lock); |
98 | if (mm->context.flush_mm) { | ||
98 | __tlb_flush_mm(mm); | 99 | __tlb_flush_mm(mm); |
100 | mm->context.flush_mm = 0; | ||
101 | } | ||
102 | spin_unlock(&mm->page_table_lock); | ||
99 | } | 103 | } |
100 | 104 | ||
101 | /* | 105 | /* |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 541053ed234e..8127ebd59c4d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -583,6 +583,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
583 | sf->gprs[9] = (unsigned long) sf; | 583 | sf->gprs[9] = (unsigned long) sf; |
584 | cpu_lowcore->save_area[15] = (unsigned long) sf; | 584 | cpu_lowcore->save_area[15] = (unsigned long) sf; |
585 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); | 585 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); |
586 | atomic_inc(&init_mm.context.attach_count); | ||
586 | asm volatile( | 587 | asm volatile( |
587 | " stam 0,15,0(%0)" | 588 | " stam 0,15,0(%0)" |
588 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | 589 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); |
@@ -659,6 +660,7 @@ void __cpu_die(unsigned int cpu) | |||
659 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) | 660 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) |
660 | udelay(10); | 661 | udelay(10); |
661 | smp_free_lowcore(cpu); | 662 | smp_free_lowcore(cpu); |
663 | atomic_dec(&init_mm.context.attach_count); | ||
662 | pr_info("Processor %d stopped\n", cpu); | 664 | pr_info("Processor %d stopped\n", cpu); |
663 | } | 665 | } |
664 | 666 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index acc91c75bc94..30eb6d02ddb8 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -74,6 +74,8 @@ void __init paging_init(void) | |||
74 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); | 74 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
75 | __raw_local_irq_ssm(ssm_mask); | 75 | __raw_local_irq_ssm(ssm_mask); |
76 | 76 | ||
77 | atomic_set(&init_mm.context.attach_count, 1); | ||
78 | |||
77 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 79 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
78 | sparse_init(); | 80 | sparse_init(); |
79 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 81 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |