aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/mmu_context.h
diff options
context:
space:
mode:
authorPeter Keilty <peter.keilty@hp.com>2005-10-31 16:44:47 -0500
committerTony Luck <tony.luck@intel.com>2005-10-31 17:36:05 -0500
commitdcc17d1baef3721d1574e5b2f4f2d4607514bcff (patch)
tree78b19a9b54f57aa010f50201e7639786b0e5f770 /include/asm-ia64/mmu_context.h
parentf2c84c0e84bfa637a7161eac10157cf3b05b4a73 (diff)
[IA64] Use bitmaps for efficient context allocation/free
Corrects the very inefficent method of finding free context_ids in get_mmu_context(). Instead of walking the task_list of all processes, 2 bitmaps are used to efficently store and lookup state, inuse and needs flushing. The entire rid address space is now used before calling wrap_mmu_context and global tlb flushing. Special thanks to Ken and Rohit for their review and modifications in using a bit flushmap. Signed-off-by: Peter Keilty <peter.keilty@hp.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/mmu_context.h')
-rw-r--r--include/asm-ia64/mmu_context.h19
1 files changed, 15 insertions, 4 deletions
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index 8d6e72f7b08e..8d9b30b5f7d4 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -32,13 +32,17 @@
32struct ia64_ctx { 32struct ia64_ctx {
33 spinlock_t lock; 33 spinlock_t lock;
34 unsigned int next; /* next context number to use */ 34 unsigned int next; /* next context number to use */
35 unsigned int limit; /* next >= limit => must call wrap_mmu_context() */ 35 unsigned int limit; /* available free range */
36 unsigned int max_ctx; /* max. context value supported by all CPUs */ 36 unsigned int max_ctx; /* max. context value supported by all CPUs */
37 /* call wrap_mmu_context when next >= max */
38 unsigned long *bitmap; /* bitmap size is max_ctx+1 */
39 unsigned long *flushmap;/* pending rid to be flushed */
37}; 40};
38 41
39extern struct ia64_ctx ia64_ctx; 42extern struct ia64_ctx ia64_ctx;
40DECLARE_PER_CPU(u8, ia64_need_tlb_flush); 43DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
41 44
45extern void mmu_context_init (void);
42extern void wrap_mmu_context (struct mm_struct *mm); 46extern void wrap_mmu_context (struct mm_struct *mm);
43 47
44static inline void 48static inline void
@@ -83,9 +87,16 @@ get_mmu_context (struct mm_struct *mm)
83 context = mm->context; 87 context = mm->context;
84 if (context == 0) { 88 if (context == 0) {
85 cpus_clear(mm->cpu_vm_mask); 89 cpus_clear(mm->cpu_vm_mask);
86 if (ia64_ctx.next >= ia64_ctx.limit) 90 if (ia64_ctx.next >= ia64_ctx.limit) {
87 wrap_mmu_context(mm); 91 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
92 ia64_ctx.max_ctx, ia64_ctx.next);
93 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
94 ia64_ctx.max_ctx, ia64_ctx.next);
95 if (ia64_ctx.next >= ia64_ctx.max_ctx)
96 wrap_mmu_context(mm);
97 }
88 mm->context = context = ia64_ctx.next++; 98 mm->context = context = ia64_ctx.next++;
99 __set_bit(context, ia64_ctx.bitmap);
89 } 100 }
90 } 101 }
91 spin_unlock_irqrestore(&ia64_ctx.lock, flags); 102 spin_unlock_irqrestore(&ia64_ctx.lock, flags);