aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h8
-rw-r--r--arch/sparc/include/asm/system_32.h2
-rw-r--r--arch/sparc/kernel/smp_32.c17
-rw-r--r--arch/sparc/kernel/smp_64.c10
4 files changed, 19 insertions, 18 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 5693ab482606..666a73fef28d 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -121,8 +121,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
121 * local TLB. 121 * local TLB.
122 */ 122 */
123 cpu = smp_processor_id(); 123 cpu = smp_processor_id();
124 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { 124 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
125 cpu_set(cpu, mm->cpu_vm_mask); 125 cpumask_set_cpu(cpu, mm_cpumask(mm));
126 __flush_tlb_mm(CTX_HWBITS(mm->context), 126 __flush_tlb_mm(CTX_HWBITS(mm->context),
127 SECONDARY_CONTEXT); 127 SECONDARY_CONTEXT);
128 } 128 }
@@ -141,8 +141,8 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
141 if (!CTX_VALID(mm->context)) 141 if (!CTX_VALID(mm->context))
142 get_new_mmu_context(mm); 142 get_new_mmu_context(mm);
143 cpu = smp_processor_id(); 143 cpu = smp_processor_id();
144 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 144 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
145 cpu_set(cpu, mm->cpu_vm_mask); 145 cpumask_set_cpu(cpu, mm_cpumask(mm));
146 146
147 load_secondary_context(mm); 147 load_secondary_context(mm);
148 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 148 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
index 79c1ae2b42a3..751c8c17f5a0 100644
--- a/arch/sparc/include/asm/system_32.h
+++ b/arch/sparc/include/asm/system_32.h
@@ -126,7 +126,7 @@ extern void flushw_all(void);
126#define switch_to(prev, next, last) do { \ 126#define switch_to(prev, next, last) do { \
127 SWITCH_ENTER(prev); \ 127 SWITCH_ENTER(prev); \
128 SWITCH_DO_LAZY_FPU(next); \ 128 SWITCH_DO_LAZY_FPU(next); \
129 cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \ 129 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \
130 __asm__ __volatile__( \ 130 __asm__ __volatile__( \
131 "sethi %%hi(here - 0x8), %%o7\n\t" \ 131 "sethi %%hi(here - 0x8), %%o7\n\t" \
132 "mov %%g6, %%g3\n\t" \ 132 "mov %%g6, %%g3\n\t" \
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index be1ae37e7733..132d81fb2616 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -143,7 +143,7 @@ void smp_flush_tlb_all(void)
143void smp_flush_cache_mm(struct mm_struct *mm) 143void smp_flush_cache_mm(struct mm_struct *mm)
144{ 144{
145 if(mm->context != NO_CONTEXT) { 145 if(mm->context != NO_CONTEXT) {
146 cpumask_t cpu_mask = mm->cpu_vm_mask; 146 cpumask_t cpu_mask = *mm_cpumask(mm);
147 cpu_clear(smp_processor_id(), cpu_mask); 147 cpu_clear(smp_processor_id(), cpu_mask);
148 if (!cpus_empty(cpu_mask)) 148 if (!cpus_empty(cpu_mask))
149 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); 149 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
@@ -154,12 +154,13 @@ void smp_flush_cache_mm(struct mm_struct *mm)
154void smp_flush_tlb_mm(struct mm_struct *mm) 154void smp_flush_tlb_mm(struct mm_struct *mm)
155{ 155{
156 if(mm->context != NO_CONTEXT) { 156 if(mm->context != NO_CONTEXT) {
157 cpumask_t cpu_mask = mm->cpu_vm_mask; 157 cpumask_t cpu_mask = *mm_cpumask(mm);
158 cpu_clear(smp_processor_id(), cpu_mask); 158 cpu_clear(smp_processor_id(), cpu_mask);
159 if (!cpus_empty(cpu_mask)) { 159 if (!cpus_empty(cpu_mask)) {
160 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); 160 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
161 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) 161 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
162 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); 162 cpumask_copy(mm_cpumask(mm),
163 cpumask_of(smp_processor_id()));
163 } 164 }
164 local_flush_tlb_mm(mm); 165 local_flush_tlb_mm(mm);
165 } 166 }
@@ -171,7 +172,7 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
171 struct mm_struct *mm = vma->vm_mm; 172 struct mm_struct *mm = vma->vm_mm;
172 173
173 if (mm->context != NO_CONTEXT) { 174 if (mm->context != NO_CONTEXT) {
174 cpumask_t cpu_mask = mm->cpu_vm_mask; 175 cpumask_t cpu_mask = *mm_cpumask(mm);
175 cpu_clear(smp_processor_id(), cpu_mask); 176 cpu_clear(smp_processor_id(), cpu_mask);
176 if (!cpus_empty(cpu_mask)) 177 if (!cpus_empty(cpu_mask))
177 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); 178 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
@@ -185,7 +186,7 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
185 struct mm_struct *mm = vma->vm_mm; 186 struct mm_struct *mm = vma->vm_mm;
186 187
187 if (mm->context != NO_CONTEXT) { 188 if (mm->context != NO_CONTEXT) {
188 cpumask_t cpu_mask = mm->cpu_vm_mask; 189 cpumask_t cpu_mask = *mm_cpumask(mm);
189 cpu_clear(smp_processor_id(), cpu_mask); 190 cpu_clear(smp_processor_id(), cpu_mask);
190 if (!cpus_empty(cpu_mask)) 191 if (!cpus_empty(cpu_mask))
191 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); 192 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
@@ -198,7 +199,7 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
198 struct mm_struct *mm = vma->vm_mm; 199 struct mm_struct *mm = vma->vm_mm;
199 200
200 if(mm->context != NO_CONTEXT) { 201 if(mm->context != NO_CONTEXT) {
201 cpumask_t cpu_mask = mm->cpu_vm_mask; 202 cpumask_t cpu_mask = *mm_cpumask(mm);
202 cpu_clear(smp_processor_id(), cpu_mask); 203 cpu_clear(smp_processor_id(), cpu_mask);
203 if (!cpus_empty(cpu_mask)) 204 if (!cpus_empty(cpu_mask))
204 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); 205 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
@@ -211,7 +212,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
211 struct mm_struct *mm = vma->vm_mm; 212 struct mm_struct *mm = vma->vm_mm;
212 213
213 if(mm->context != NO_CONTEXT) { 214 if(mm->context != NO_CONTEXT) {
214 cpumask_t cpu_mask = mm->cpu_vm_mask; 215 cpumask_t cpu_mask = *mm_cpumask(mm);
215 cpu_clear(smp_processor_id(), cpu_mask); 216 cpu_clear(smp_processor_id(), cpu_mask);
216 if (!cpus_empty(cpu_mask)) 217 if (!cpus_empty(cpu_mask))
217 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); 218 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
@@ -240,7 +241,7 @@ void smp_flush_page_to_ram(unsigned long page)
240 241
241void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 242void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
242{ 243{
243 cpumask_t cpu_mask = mm->cpu_vm_mask; 244 cpumask_t cpu_mask = *mm_cpumask(mm);
244 cpu_clear(smp_processor_id(), cpu_mask); 245 cpu_clear(smp_processor_id(), cpu_mask);
245 if (!cpus_empty(cpu_mask)) 246 if (!cpus_empty(cpu_mask))
246 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); 247 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 4e17eec41478..2de937c7232e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -850,7 +850,7 @@ static void tsb_sync(void *info)
850 850
851void smp_tsb_sync(struct mm_struct *mm) 851void smp_tsb_sync(struct mm_struct *mm)
852{ 852{
853 smp_call_function_many(&mm->cpu_vm_mask, tsb_sync, mm, 1); 853 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
854} 854}
855 855
856extern unsigned long xcall_flush_tlb_mm; 856extern unsigned long xcall_flush_tlb_mm;
@@ -1055,13 +1055,13 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
1055 int cpu = get_cpu(); 1055 int cpu = get_cpu();
1056 1056
1057 if (atomic_read(&mm->mm_users) == 1) { 1057 if (atomic_read(&mm->mm_users) == 1) {
1058 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 1058 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1059 goto local_flush_and_out; 1059 goto local_flush_and_out;
1060 } 1060 }
1061 1061
1062 smp_cross_call_masked(&xcall_flush_tlb_mm, 1062 smp_cross_call_masked(&xcall_flush_tlb_mm,
1063 ctx, 0, 0, 1063 ctx, 0, 0,
1064 &mm->cpu_vm_mask); 1064 mm_cpumask(mm));
1065 1065
1066local_flush_and_out: 1066local_flush_and_out:
1067 __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 1067 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
@@ -1075,11 +1075,11 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
1075 int cpu = get_cpu(); 1075 int cpu = get_cpu();
1076 1076
1077 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) 1077 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1078 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 1078 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1079 else 1079 else
1080 smp_cross_call_masked(&xcall_flush_tlb_pending, 1080 smp_cross_call_masked(&xcall_flush_tlb_pending,
1081 ctx, nr, (unsigned long) vaddrs, 1081 ctx, nr, (unsigned long) vaddrs,
1082 &mm->cpu_vm_mask); 1082 mm_cpumask(mm));
1083 1083
1084 __flush_tlb_pending(ctx, nr, vaddrs); 1084 __flush_tlb_pending(ctx, nr, vaddrs);
1085 1085