diff options
| -rw-r--r-- | arch/x86/include/asm/mmu_context.h | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 1edc9cd198b8..bfd9b2a35a0b 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
| @@ -132,14 +132,16 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 132 | * be sent, and CPU 0's TLB will contain a stale entry.) | 132 | * be sent, and CPU 0's TLB will contain a stale entry.) |
| 133 | * | 133 | * |
| 134 | * The bad outcome can occur if either CPU's load is | 134 | * The bad outcome can occur if either CPU's load is |
| 135 | * reordered before that CPU's store, so both CPUs much | 135 | * reordered before that CPU's store, so both CPUs must |
| 136 | * execute full barriers to prevent this from happening. | 136 | * execute full barriers to prevent this from happening. |
| 137 | * | 137 | * |
| 138 | * Thus, switch_mm needs a full barrier between the | 138 | * Thus, switch_mm needs a full barrier between the |
| 139 | * store to mm_cpumask and any operation that could load | 139 | * store to mm_cpumask and any operation that could load |
| 140 | * from next->pgd. This barrier synchronizes with | 140 | * from next->pgd. TLB fills are special and can happen |
| 141 | * remote TLB flushers. Fortunately, load_cr3 is | 141 | * due to instruction fetches or for no reason at all, |
| 142 | * serializing and thus acts as a full barrier. | 142 | * and neither LOCK nor MFENCE orders them. |
| 143 | * Fortunately, load_cr3() is serializing and gives the | ||
| 144 | * ordering guarantee we need. | ||
| 143 | * | 145 | * |
| 144 | */ | 146 | */ |
| 145 | load_cr3(next->pgd); | 147 | load_cr3(next->pgd); |
| @@ -188,9 +190,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 188 | * tlb flush IPI delivery. We must reload CR3 | 190 | * tlb flush IPI delivery. We must reload CR3 |
| 189 | * to make sure to use no freed page tables. | 191 | * to make sure to use no freed page tables. |
| 190 | * | 192 | * |
| 191 | * As above, this is a barrier that forces | 193 | * As above, load_cr3() is serializing and orders TLB |
| 192 | * TLB repopulation to be ordered after the | 194 | * fills with respect to the mm_cpumask write. |
| 193 | * store to mm_cpumask. | ||
| 194 | */ | 195 | */ |
| 195 | load_cr3(next->pgd); | 196 | load_cr3(next->pgd); |
| 196 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | 197 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
