diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2017-08-17 02:15:16 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2017-09-06 03:24:42 -0400 |
commit | 60f07c8ec5fae06c23e9fd7bab67dabce92b3414 (patch) | |
tree | 3c189bb7d158caba68c36b467603f94b243eea8f | |
parent | b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 (diff) |
s390/mm: fix race on mm->context.flush_mm
The order in __tlb_flush_mm_lazy is to flush TLB first and then clear
the mm->context.flush_mm bit. This can lead to missed flushes as the
bit can be set anytime, the order needs to be the other way aronud.
But this leads to a different race, __tlb_flush_mm_lazy may be called
on two CPUs concurrently. If mm->context.flush_mm is cleared first then
another CPU can bypass __tlb_flush_mm_lazy although the first CPU has
not done the flush yet. In a virtualized environment the time until the
flush is finally completed can be arbitrarily long.
Add a spinlock to serialize __tlb_flush_mm_lazy and use the function
in finish_arch_post_lock_switch as well.
Cc: <stable@vger.kernel.org>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | arch/s390/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 4 |
3 files changed, 7 insertions, 3 deletions
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index bd6f30304518..3525fe6e7e4c 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/errno.h> | 5 | #include <linux/errno.h> |
6 | 6 | ||
7 | typedef struct { | 7 | typedef struct { |
8 | spinlock_t lock; | ||
8 | cpumask_t cpu_attach_mask; | 9 | cpumask_t cpu_attach_mask; |
9 | atomic_t flush_count; | 10 | atomic_t flush_count; |
10 | unsigned int flush_mm; | 11 | unsigned int flush_mm; |
@@ -27,6 +28,7 @@ typedef struct { | |||
27 | } mm_context_t; | 28 | } mm_context_t; |
28 | 29 | ||
29 | #define INIT_MM_CONTEXT(name) \ | 30 | #define INIT_MM_CONTEXT(name) \ |
31 | .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \ | ||
30 | .context.pgtable_lock = \ | 32 | .context.pgtable_lock = \ |
31 | __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \ | 33 | __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \ |
32 | .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ | 34 | .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 8823e35f69a9..484efe8f4234 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -17,6 +17,7 @@ | |||
17 | static inline int init_new_context(struct task_struct *tsk, | 17 | static inline int init_new_context(struct task_struct *tsk, |
18 | struct mm_struct *mm) | 18 | struct mm_struct *mm) |
19 | { | 19 | { |
20 | spin_lock_init(&mm->context.lock); | ||
20 | spin_lock_init(&mm->context.pgtable_lock); | 21 | spin_lock_init(&mm->context.pgtable_lock); |
21 | INIT_LIST_HEAD(&mm->context.pgtable_list); | 22 | INIT_LIST_HEAD(&mm->context.pgtable_list); |
22 | spin_lock_init(&mm->context.gmap_lock); | 23 | spin_lock_init(&mm->context.gmap_lock); |
@@ -121,8 +122,7 @@ static inline void finish_arch_post_lock_switch(void) | |||
121 | while (atomic_read(&mm->context.flush_count)) | 122 | while (atomic_read(&mm->context.flush_count)) |
122 | cpu_relax(); | 123 | cpu_relax(); |
123 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 124 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
124 | if (mm->context.flush_mm) | 125 | __tlb_flush_mm_lazy(mm); |
125 | __tlb_flush_mm(mm); | ||
126 | preempt_enable(); | 126 | preempt_enable(); |
127 | } | 127 | } |
128 | set_fs(current->thread.mm_segment); | 128 | set_fs(current->thread.mm_segment); |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 16fe2a3d9a03..b08d5bc2666e 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -101,10 +101,12 @@ static inline void __tlb_flush_kernel(void) | |||
101 | 101 | ||
102 | static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) | 102 | static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) |
103 | { | 103 | { |
104 | spin_lock(&mm->context.lock); | ||
104 | if (mm->context.flush_mm) { | 105 | if (mm->context.flush_mm) { |
105 | __tlb_flush_mm(mm); | ||
106 | mm->context.flush_mm = 0; | 106 | mm->context.flush_mm = 0; |
107 | __tlb_flush_mm(mm); | ||
107 | } | 108 | } |
109 | spin_unlock(&mm->context.lock); | ||
108 | } | 110 | } |
109 | 111 | ||
110 | /* | 112 | /* |