aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-11-10 13:39:49 -0500
committerTony Luck <tony.luck@intel.com>2005-11-10 13:39:49 -0500
commit7669a22592fc6cc7ac03f55a7db8d23ce938f1dc (patch)
tree4e83ba401350c3c35689fc84f1c3af81c07b3f88 /include
parentcb8a55e4cda017ed37a3ee051365f33a86956312 (diff)
parent58cd90829918dabbd81a453de676d41fb7b628ad (diff)
Pull context-bitmap into release branch
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/mmu_context.h81
-rw-r--r--include/asm-ia64/tlbflush.h1
2 files changed, 48 insertions, 34 deletions
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index 8d6e72f7b08e..b5c65081a3aa 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -7,12 +7,13 @@
7 */ 7 */
8 8
9/* 9/*
10 * Routines to manage the allocation of task context numbers. Task context numbers are 10 * Routines to manage the allocation of task context numbers. Task context
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches. 11 * numbers are used to reduce or eliminate the need to perform TLB flushes
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not 12 * due to context switches. Context numbers are implemented using ia-64
13 * consider the region number when performing a TLB lookup, we need to assign a unique 13 * region ids. Since the IA-64 TLB does not consider the region number when
14 * region id to each region in a process. We use the least significant three bits in a 14 * performing a TLB lookup, we need to assign a unique region id to each
15 * region id for this purpose. 15 * region in a process. We use the least significant three bits in aregion
16 * id for this purpose.
16 */ 17 */
17 18
18#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ 19#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
@@ -32,13 +33,17 @@
32struct ia64_ctx { 33struct ia64_ctx {
33 spinlock_t lock; 34 spinlock_t lock;
34 unsigned int next; /* next context number to use */ 35 unsigned int next; /* next context number to use */
35 unsigned int limit; /* next >= limit => must call wrap_mmu_context() */ 36 unsigned int limit; /* available free range */
36 unsigned int max_ctx; /* max. context value supported by all CPUs */ 37 unsigned int max_ctx; /* max. context value supported by all CPUs */
38 /* call wrap_mmu_context when next >= max */
39 unsigned long *bitmap; /* bitmap size is max_ctx+1 */
40 unsigned long *flushmap;/* pending rid to be flushed */
37}; 41};
38 42
39extern struct ia64_ctx ia64_ctx; 43extern struct ia64_ctx ia64_ctx;
40DECLARE_PER_CPU(u8, ia64_need_tlb_flush); 44DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
41 45
46extern void mmu_context_init (void);
42extern void wrap_mmu_context (struct mm_struct *mm); 47extern void wrap_mmu_context (struct mm_struct *mm);
43 48
44static inline void 49static inline void
@@ -47,10 +52,10 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
47} 52}
48 53
49/* 54/*
50 * When the context counter wraps around all TLBs need to be flushed because an old 55 * When the context counter wraps around all TLBs need to be flushed because
51 * context number might have been reused. This is signalled by the ia64_need_tlb_flush 56 * an old context number might have been reused. This is signalled by the
52 * per-CPU variable, which is checked in the routine below. Called by activate_mm(). 57 * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
53 * <efocht@ess.nec.de> 58 * below. Called by activate_mm(). <efocht@ess.nec.de>
54 */ 59 */
55static inline void 60static inline void
56delayed_tlb_flush (void) 61delayed_tlb_flush (void)
@@ -60,11 +65,9 @@ delayed_tlb_flush (void)
60 65
61 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { 66 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
62 spin_lock_irqsave(&ia64_ctx.lock, flags); 67 spin_lock_irqsave(&ia64_ctx.lock, flags);
63 { 68 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
64 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { 69 local_flush_tlb_all();
65 local_flush_tlb_all(); 70 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
66 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
67 }
68 } 71 }
69 spin_unlock_irqrestore(&ia64_ctx.lock, flags); 72 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
70 } 73 }
@@ -76,20 +79,27 @@ get_mmu_context (struct mm_struct *mm)
76 unsigned long flags; 79 unsigned long flags;
77 nv_mm_context_t context = mm->context; 80 nv_mm_context_t context = mm->context;
78 81
79 if (unlikely(!context)) { 82 if (likely(context))
80 spin_lock_irqsave(&ia64_ctx.lock, flags); 83 goto out;
81 { 84
82 /* re-check, now that we've got the lock: */ 85 spin_lock_irqsave(&ia64_ctx.lock, flags);
83 context = mm->context; 86 /* re-check, now that we've got the lock: */
84 if (context == 0) { 87 context = mm->context;
85 cpus_clear(mm->cpu_vm_mask); 88 if (context == 0) {
86 if (ia64_ctx.next >= ia64_ctx.limit) 89 cpus_clear(mm->cpu_vm_mask);
87 wrap_mmu_context(mm); 90 if (ia64_ctx.next >= ia64_ctx.limit) {
88 mm->context = context = ia64_ctx.next++; 91 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
89 } 92 ia64_ctx.max_ctx, ia64_ctx.next);
93 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
94 ia64_ctx.max_ctx, ia64_ctx.next);
95 if (ia64_ctx.next >= ia64_ctx.max_ctx)
96 wrap_mmu_context(mm);
90 } 97 }
91 spin_unlock_irqrestore(&ia64_ctx.lock, flags); 98 mm->context = context = ia64_ctx.next++;
99 __set_bit(context, ia64_ctx.bitmap);
92 } 100 }
101 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
102out:
93 /* 103 /*
94 * Ensure we're not starting to use "context" before any old 104 * Ensure we're not starting to use "context" before any old
95 * uses of it are gone from our TLB. 105 * uses of it are gone from our TLB.
@@ -100,8 +110,8 @@ get_mmu_context (struct mm_struct *mm)
100} 110}
101 111
102/* 112/*
103 * Initialize context number to some sane value. MM is guaranteed to be a brand-new 113 * Initialize context number to some sane value. MM is guaranteed to be a
104 * address-space, so no TLB flushing is needed, ever. 114 * brand-new address-space, so no TLB flushing is needed, ever.
105 */ 115 */
106static inline int 116static inline int
107init_new_context (struct task_struct *p, struct mm_struct *mm) 117init_new_context (struct task_struct *p, struct mm_struct *mm)
@@ -162,7 +172,10 @@ activate_context (struct mm_struct *mm)
162 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 172 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
163 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 173 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
164 reload_context(context); 174 reload_context(context);
165 /* in the unlikely event of a TLB-flush by another thread, redo the load: */ 175 /*
176 * in the unlikely event of a TLB-flush by another thread,
177 * redo the load.
178 */
166 } while (unlikely(context != mm->context)); 179 } while (unlikely(context != mm->context));
167} 180}
168 181
@@ -175,8 +188,8 @@ static inline void
175activate_mm (struct mm_struct *prev, struct mm_struct *next) 188activate_mm (struct mm_struct *prev, struct mm_struct *next)
176{ 189{
177 /* 190 /*
178 * We may get interrupts here, but that's OK because interrupt handlers cannot 191 * We may get interrupts here, but that's OK because interrupt
179 * touch user-space. 192 * handlers cannot touch user-space.
180 */ 193 */
181 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); 194 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
182 activate_context(next); 195 activate_context(next);
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index b65c62702724..a35b323bae4c 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -51,6 +51,7 @@ flush_tlb_mm (struct mm_struct *mm)
51 if (!mm) 51 if (!mm)
52 return; 52 return;
53 53
54 set_bit(mm->context, ia64_ctx.flushmap);
54 mm->context = 0; 55 mm->context = 0;
55 56
56 if (atomic_read(&mm->mm_users) == 0) 57 if (atomic_read(&mm->mm_users) == 0)