diff options
author | Chen, Kenneth W <kenneth.w.chen@intel.com> | 2005-10-29 21:47:04 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-11-03 17:43:50 -0500 |
commit | 58cd90829918dabbd81a453de676d41fb7b628ad (patch) | |
tree | 7806f84a89f767f3a9e78d0e198054f24fae0aee /include/asm-ia64 | |
parent | dcc17d1baef3721d1574e5b2f4f2d4607514bcff (diff) |
[IA64] make mmu_context.h and tlb.c 80-column friendly
wrap_mmu_context(), delayed_tlb_flush(), get_mmu_context() all
have an extra { } block which cause one extra indentation.
get_mmu_context() is particularly bad with 5 indentations to
the most inner "if". It finally gets on my nerve that I can't
keep the code within 80 columns. Remove the extra { } block
and while I'm at it, reformat all the comments to 80-column
friendly. No functional change at all with this patch.
Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/mmu_context.h | 80 |
1 files changed, 41 insertions, 39 deletions
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index 8d9b30b5f7d4..b5c65081a3aa 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h | |||
@@ -7,12 +7,13 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Routines to manage the allocation of task context numbers. Task context numbers are | 10 | * Routines to manage the allocation of task context numbers. Task context |
11 | * used to reduce or eliminate the need to perform TLB flushes due to context switches. | 11 | * numbers are used to reduce or eliminate the need to perform TLB flushes |
12 | * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not | 12 | * due to context switches. Context numbers are implemented using ia-64 |
13 | * consider the region number when performing a TLB lookup, we need to assign a unique | 13 | * region ids. Since the IA-64 TLB does not consider the region number when |
14 | * region id to each region in a process. We use the least significant three bits in a | 14 | * performing a TLB lookup, we need to assign a unique region id to each |
15 | * region id for this purpose. | 15 | * region in a process. We use the least significant three bits in aregion |
16 | * id for this purpose. | ||
16 | */ | 17 | */ |
17 | 18 | ||
18 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ | 19 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ |
@@ -51,10 +52,10 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) | |||
51 | } | 52 | } |
52 | 53 | ||
53 | /* | 54 | /* |
54 | * When the context counter wraps around all TLBs need to be flushed because an old | 55 | * When the context counter wraps around all TLBs need to be flushed because |
55 | * context number might have been reused. This is signalled by the ia64_need_tlb_flush | 56 | * an old context number might have been reused. This is signalled by the |
56 | * per-CPU variable, which is checked in the routine below. Called by activate_mm(). | 57 | * ia64_need_tlb_flush per-CPU variable, which is checked in the routine |
57 | * <efocht@ess.nec.de> | 58 | * below. Called by activate_mm(). <efocht@ess.nec.de> |
58 | */ | 59 | */ |
59 | static inline void | 60 | static inline void |
60 | delayed_tlb_flush (void) | 61 | delayed_tlb_flush (void) |
@@ -64,11 +65,9 @@ delayed_tlb_flush (void) | |||
64 | 65 | ||
65 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { | 66 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { |
66 | spin_lock_irqsave(&ia64_ctx.lock, flags); | 67 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
67 | { | 68 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { |
68 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { | 69 | local_flush_tlb_all(); |
69 | local_flush_tlb_all(); | 70 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; |
70 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | ||
71 | } | ||
72 | } | 71 | } |
73 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | 72 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); |
74 | } | 73 | } |
@@ -80,27 +79,27 @@ get_mmu_context (struct mm_struct *mm) | |||
80 | unsigned long flags; | 79 | unsigned long flags; |
81 | nv_mm_context_t context = mm->context; | 80 | nv_mm_context_t context = mm->context; |
82 | 81 | ||
83 | if (unlikely(!context)) { | 82 | if (likely(context)) |
84 | spin_lock_irqsave(&ia64_ctx.lock, flags); | 83 | goto out; |
85 | { | 84 | |
86 | /* re-check, now that we've got the lock: */ | 85 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
87 | context = mm->context; | 86 | /* re-check, now that we've got the lock: */ |
88 | if (context == 0) { | 87 | context = mm->context; |
89 | cpus_clear(mm->cpu_vm_mask); | 88 | if (context == 0) { |
90 | if (ia64_ctx.next >= ia64_ctx.limit) { | 89 | cpus_clear(mm->cpu_vm_mask); |
91 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, | 90 | if (ia64_ctx.next >= ia64_ctx.limit) { |
92 | ia64_ctx.max_ctx, ia64_ctx.next); | 91 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, |
93 | ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, | 92 | ia64_ctx.max_ctx, ia64_ctx.next); |
94 | ia64_ctx.max_ctx, ia64_ctx.next); | 93 | ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, |
95 | if (ia64_ctx.next >= ia64_ctx.max_ctx) | 94 | ia64_ctx.max_ctx, ia64_ctx.next); |
96 | wrap_mmu_context(mm); | 95 | if (ia64_ctx.next >= ia64_ctx.max_ctx) |
97 | } | 96 | wrap_mmu_context(mm); |
98 | mm->context = context = ia64_ctx.next++; | ||
99 | __set_bit(context, ia64_ctx.bitmap); | ||
100 | } | ||
101 | } | 97 | } |
102 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | 98 | mm->context = context = ia64_ctx.next++; |
99 | __set_bit(context, ia64_ctx.bitmap); | ||
103 | } | 100 | } |
101 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | ||
102 | out: | ||
104 | /* | 103 | /* |
105 | * Ensure we're not starting to use "context" before any old | 104 | * Ensure we're not starting to use "context" before any old |
106 | * uses of it are gone from our TLB. | 105 | * uses of it are gone from our TLB. |
@@ -111,8 +110,8 @@ get_mmu_context (struct mm_struct *mm) | |||
111 | } | 110 | } |
112 | 111 | ||
113 | /* | 112 | /* |
114 | * Initialize context number to some sane value. MM is guaranteed to be a brand-new | 113 | * Initialize context number to some sane value. MM is guaranteed to be a |
115 | * address-space, so no TLB flushing is needed, ever. | 114 | * brand-new address-space, so no TLB flushing is needed, ever. |
116 | */ | 115 | */ |
117 | static inline int | 116 | static inline int |
118 | init_new_context (struct task_struct *p, struct mm_struct *mm) | 117 | init_new_context (struct task_struct *p, struct mm_struct *mm) |
@@ -173,7 +172,10 @@ activate_context (struct mm_struct *mm) | |||
173 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 172 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) |
174 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 173 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); |
175 | reload_context(context); | 174 | reload_context(context); |
176 | /* in the unlikely event of a TLB-flush by another thread, redo the load: */ | 175 | /* |
176 | * in the unlikely event of a TLB-flush by another thread, | ||
177 | * redo the load. | ||
178 | */ | ||
177 | } while (unlikely(context != mm->context)); | 179 | } while (unlikely(context != mm->context)); |
178 | } | 180 | } |
179 | 181 | ||
@@ -186,8 +188,8 @@ static inline void | |||
186 | activate_mm (struct mm_struct *prev, struct mm_struct *next) | 188 | activate_mm (struct mm_struct *prev, struct mm_struct *next) |
187 | { | 189 | { |
188 | /* | 190 | /* |
189 | * We may get interrupts here, but that's OK because interrupt handlers cannot | 191 | * We may get interrupts here, but that's OK because interrupt |
190 | * touch user-space. | 192 | * handlers cannot touch user-space. |
191 | */ | 193 | */ |
192 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); | 194 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); |
193 | activate_context(next); | 195 | activate_context(next); |