aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ia64/mmu_context.h')
-rw-r--r--include/asm-ia64/mmu_context.h61
1 files changed, 38 insertions, 23 deletions
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index e3e5fededb04..8d6e72f7b08e 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -19,6 +19,7 @@
19 19
20#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) 20#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
21 21
22# include <asm/page.h>
22# ifndef __ASSEMBLY__ 23# ifndef __ASSEMBLY__
23 24
24#include <linux/compiler.h> 25#include <linux/compiler.h>
@@ -55,34 +56,46 @@ static inline void
55delayed_tlb_flush (void) 56delayed_tlb_flush (void)
56{ 57{
57 extern void local_flush_tlb_all (void); 58 extern void local_flush_tlb_all (void);
59 unsigned long flags;
58 60
59 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { 61 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
60 local_flush_tlb_all(); 62 spin_lock_irqsave(&ia64_ctx.lock, flags);
61 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; 63 {
64 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
65 local_flush_tlb_all();
66 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
67 }
68 }
69 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
62 } 70 }
63} 71}
64 72
65static inline mm_context_t 73static inline nv_mm_context_t
66get_mmu_context (struct mm_struct *mm) 74get_mmu_context (struct mm_struct *mm)
67{ 75{
68 unsigned long flags; 76 unsigned long flags;
69 mm_context_t context = mm->context; 77 nv_mm_context_t context = mm->context;
70 78
71 if (context) 79 if (unlikely(!context)) {
72 return context; 80 spin_lock_irqsave(&ia64_ctx.lock, flags);
73 81 {
74 spin_lock_irqsave(&ia64_ctx.lock, flags); 82 /* re-check, now that we've got the lock: */
75 { 83 context = mm->context;
76 /* re-check, now that we've got the lock: */ 84 if (context == 0) {
77 context = mm->context; 85 cpus_clear(mm->cpu_vm_mask);
78 if (context == 0) { 86 if (ia64_ctx.next >= ia64_ctx.limit)
79 cpus_clear(mm->cpu_vm_mask); 87 wrap_mmu_context(mm);
80 if (ia64_ctx.next >= ia64_ctx.limit) 88 mm->context = context = ia64_ctx.next++;
81 wrap_mmu_context(mm); 89 }
82 mm->context = context = ia64_ctx.next++;
83 } 90 }
91 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
84 } 92 }
85 spin_unlock_irqrestore(&ia64_ctx.lock, flags); 93 /*
94 * Ensure we're not starting to use "context" before any old
95 * uses of it are gone from our TLB.
96 */
97 delayed_tlb_flush();
98
86 return context; 99 return context;
87} 100}
88 101
@@ -104,13 +117,13 @@ destroy_context (struct mm_struct *mm)
104} 117}
105 118
106static inline void 119static inline void
107reload_context (mm_context_t context) 120reload_context (nv_mm_context_t context)
108{ 121{
109 unsigned long rid; 122 unsigned long rid;
110 unsigned long rid_incr = 0; 123 unsigned long rid_incr = 0;
111 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; 124 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
112 125
113 old_rr4 = ia64_get_rr(0x8000000000000000UL); 126 old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
114 rid = context << 3; /* make space for encoding the region number */ 127 rid = context << 3; /* make space for encoding the region number */
115 rid_incr = 1 << 8; 128 rid_incr = 1 << 8;
116 129
@@ -122,6 +135,10 @@ reload_context (mm_context_t context)
122 rr4 = rr0 + 4*rid_incr; 135 rr4 = rr0 + 4*rid_incr;
123#ifdef CONFIG_HUGETLB_PAGE 136#ifdef CONFIG_HUGETLB_PAGE
124 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); 137 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
138
139# if RGN_HPAGE != 4
140# error "reload_context assumes RGN_HPAGE is 4"
141# endif
125#endif 142#endif
126 143
127 ia64_set_rr(0x0000000000000000UL, rr0); 144 ia64_set_rr(0x0000000000000000UL, rr0);
@@ -138,7 +155,7 @@ reload_context (mm_context_t context)
138static inline void 155static inline void
139activate_context (struct mm_struct *mm) 156activate_context (struct mm_struct *mm)
140{ 157{
141 mm_context_t context; 158 nv_mm_context_t context;
142 159
143 do { 160 do {
144 context = get_mmu_context(mm); 161 context = get_mmu_context(mm);
@@ -157,8 +174,6 @@ activate_context (struct mm_struct *mm)
157static inline void 174static inline void
158activate_mm (struct mm_struct *prev, struct mm_struct *next) 175activate_mm (struct mm_struct *prev, struct mm_struct *next)
159{ 176{
160 delayed_tlb_flush();
161
162 /* 177 /*
163 * We may get interrupts here, but that's OK because interrupt handlers cannot 178 * We may get interrupts here, but that's OK because interrupt handlers cannot
164 * touch user-space. 179 * touch user-space.