aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
authorDavid Mosberger-Tang <David.Mosberger@acm.org>2005-07-26 01:23:00 -0400
committerTony Luck <tony.luck@intel.com>2005-08-12 18:05:21 -0400
commitbadea125d7cbd93f1678a95cf009b3bdfe6065cd (patch)
treec9cd47cfc5f7474fdf60735548734e647a4f7a9d /include/asm-ia64
parent7d69fa6266770eeb6317eddd46b64456e8a515bf (diff)
[IA64] Fix race in mm-context wrap-around logic.
The patch below should fix a race which could cause stale TLB entries. Specifically, when 2 CPUs ended up racing for entrance to wrap_mmu_context(). The losing CPU would find that by the time it acquired ctx.lock, mm->context already had a valid value, but then it failed to (re-)check the delayed TLB flushing logic and hence could end up using a context number when there were still stale entries in its TLB. The fix is to check for delayed TLB flushes only after mm->context is valid (non-zero). The patch also makes GCC v4.x happier by defining a non-volatile variant of mm_context_t called nv_mm_context_t. Signed-off-by: David Mosberger-Tang <David.Mosberger@acm.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/mmu.h8
-rw-r--r--include/asm-ia64/mmu_context.h54
2 files changed, 37 insertions, 25 deletions
diff --git a/include/asm-ia64/mmu.h b/include/asm-ia64/mmu.h
index ae1525352a25..611432ba579c 100644
--- a/include/asm-ia64/mmu.h
+++ b/include/asm-ia64/mmu.h
@@ -2,10 +2,12 @@
2#define __MMU_H 2#define __MMU_H
3 3
4/* 4/*
5 * Type for a context number. We declare it volatile to ensure proper ordering when it's 5 * Type for a context number. We declare it volatile to ensure proper
6 * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and 6 * ordering when it's accessed outside of spinlock'd critical sections
7 * init_new_context()). 7 * (e.g., as done in activate_mm() and init_new_context()).
8 */ 8 */
9typedef volatile unsigned long mm_context_t; 9typedef volatile unsigned long mm_context_t;
10 10
11typedef unsigned long nv_mm_context_t;
12
11#endif 13#endif
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index e3e5fededb04..0680d163be97 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -55,34 +55,46 @@ static inline void
55delayed_tlb_flush (void) 55delayed_tlb_flush (void)
56{ 56{
57 extern void local_flush_tlb_all (void); 57 extern void local_flush_tlb_all (void);
58 unsigned long flags;
58 59
59 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { 60 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
60 local_flush_tlb_all(); 61 spin_lock_irqsave(&ia64_ctx.lock, flags);
61 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; 62 {
63 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
64 local_flush_tlb_all();
65 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
66 }
67 }
68 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
62 } 69 }
63} 70}
64 71
65static inline mm_context_t 72static inline nv_mm_context_t
66get_mmu_context (struct mm_struct *mm) 73get_mmu_context (struct mm_struct *mm)
67{ 74{
68 unsigned long flags; 75 unsigned long flags;
69 mm_context_t context = mm->context; 76 nv_mm_context_t context = mm->context;
70 77
71 if (context) 78 if (unlikely(!context)) {
72 return context; 79 spin_lock_irqsave(&ia64_ctx.lock, flags);
73 80 {
74 spin_lock_irqsave(&ia64_ctx.lock, flags); 81 /* re-check, now that we've got the lock: */
75 { 82 context = mm->context;
76 /* re-check, now that we've got the lock: */ 83 if (context == 0) {
77 context = mm->context; 84 cpus_clear(mm->cpu_vm_mask);
78 if (context == 0) { 85 if (ia64_ctx.next >= ia64_ctx.limit)
79 cpus_clear(mm->cpu_vm_mask); 86 wrap_mmu_context(mm);
80 if (ia64_ctx.next >= ia64_ctx.limit) 87 mm->context = context = ia64_ctx.next++;
81 wrap_mmu_context(mm); 88 }
82 mm->context = context = ia64_ctx.next++;
83 } 89 }
90 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
84 } 91 }
85 spin_unlock_irqrestore(&ia64_ctx.lock, flags); 92 /*
93 * Ensure we're not starting to use "context" before any old
94 * uses of it are gone from our TLB.
95 */
96 delayed_tlb_flush();
97
86 return context; 98 return context;
87} 99}
88 100
@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
104} 116}
105 117
106static inline void 118static inline void
107reload_context (mm_context_t context) 119reload_context (nv_mm_context_t context)
108{ 120{
109 unsigned long rid; 121 unsigned long rid;
110 unsigned long rid_incr = 0; 122 unsigned long rid_incr = 0;
@@ -138,7 +150,7 @@ reload_context (mm_context_t context)
138static inline void 150static inline void
139activate_context (struct mm_struct *mm) 151activate_context (struct mm_struct *mm)
140{ 152{
141 mm_context_t context; 153 nv_mm_context_t context;
142 154
143 do { 155 do {
144 context = get_mmu_context(mm); 156 context = get_mmu_context(mm);
@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
157static inline void 169static inline void
158activate_mm (struct mm_struct *prev, struct mm_struct *next) 170activate_mm (struct mm_struct *prev, struct mm_struct *next)
159{ 171{
160 delayed_tlb_flush();
161
162 /* 172 /*
163 * We may get interrupts here, but that's OK because interrupt handlers cannot 173 * We may get interrupts here, but that's OK because interrupt handlers cannot
164 * touch user-space. 174 * touch user-space.