aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm/tlb-smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mn10300/mm/tlb-smp.c')
-rw-r--r--arch/mn10300/mm/tlb-smp.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index 0b6a5ad1960e..9a777498a916 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -64,7 +64,7 @@ void smp_flush_tlb(void *unused)
64 64
65 cpu_id = get_cpu(); 65 cpu_id = get_cpu();
66 66
67 if (!cpu_isset(cpu_id, flush_cpumask)) 67 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
68 /* This was a BUG() but until someone can quote me the line 68 /* This was a BUG() but until someone can quote me the line
69 * from the intel manual that guarantees an IPI to multiple 69 * from the intel manual that guarantees an IPI to multiple
70 * CPUs is retried _only_ on the erroring CPUs its staying as a 70 * CPUs is retried _only_ on the erroring CPUs its staying as a
@@ -80,7 +80,7 @@ void smp_flush_tlb(void *unused)
80 local_flush_tlb_page(flush_mm, flush_va); 80 local_flush_tlb_page(flush_mm, flush_va);
81 81
82 smp_mb__before_clear_bit(); 82 smp_mb__before_clear_bit();
83 cpu_clear(cpu_id, flush_cpumask); 83 cpumask_clear_cpu(cpu_id, &flush_cpumask);
84 smp_mb__after_clear_bit(); 84 smp_mb__after_clear_bit();
85out: 85out:
86 put_cpu(); 86 put_cpu();
@@ -103,11 +103,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
103 * - we do not send IPIs to as-yet unbooted CPUs. 103 * - we do not send IPIs to as-yet unbooted CPUs.
104 */ 104 */
105 BUG_ON(!mm); 105 BUG_ON(!mm);
106 BUG_ON(cpus_empty(cpumask)); 106 BUG_ON(cpumask_empty(&cpumask));
107 BUG_ON(cpu_isset(smp_processor_id(), cpumask)); 107 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
108 108
109 cpus_and(tmp, cpumask, cpu_online_map); 109 cpumask_and(&tmp, &cpumask, cpu_online_mask);
110 BUG_ON(!cpus_equal(cpumask, tmp)); 110 BUG_ON(!cpumask_equal(&cpumask, &tmp));
111 111
112 /* I'm not happy about this global shared spinlock in the MM hot path, 112 /* I'm not happy about this global shared spinlock in the MM hot path,
113 * but we'll see how contended it is. 113 * but we'll see how contended it is.
@@ -128,7 +128,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ 128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
129 smp_call_function(smp_flush_tlb, NULL, 1); 129 smp_call_function(smp_flush_tlb, NULL, 1);
130 130
131 while (!cpus_empty(flush_cpumask)) 131 while (!cpumask_empty(&flush_cpumask))
132 /* Lockup detection does not belong here */ 132 /* Lockup detection does not belong here */
133 smp_mb(); 133 smp_mb();
134 134
@@ -146,11 +146,11 @@ void flush_tlb_mm(struct mm_struct *mm)
146 cpumask_t cpu_mask; 146 cpumask_t cpu_mask;
147 147
148 preempt_disable(); 148 preempt_disable();
149 cpu_mask = mm->cpu_vm_mask; 149 cpumask_copy(&cpu_mask, mm_cpumask(mm));
150 cpu_clear(smp_processor_id(), cpu_mask); 150 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
151 151
152 local_flush_tlb(); 152 local_flush_tlb();
153 if (!cpus_empty(cpu_mask)) 153 if (!cpumask_empty(&cpu_mask))
154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
155 155
156 preempt_enable(); 156 preempt_enable();
@@ -165,11 +165,11 @@ void flush_tlb_current_task(void)
165 cpumask_t cpu_mask; 165 cpumask_t cpu_mask;
166 166
167 preempt_disable(); 167 preempt_disable();
168 cpu_mask = mm->cpu_vm_mask; 168 cpumask_copy(&cpu_mask, mm_cpumask(mm));
169 cpu_clear(smp_processor_id(), cpu_mask); 169 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
170 170
171 local_flush_tlb(); 171 local_flush_tlb();
172 if (!cpus_empty(cpu_mask)) 172 if (!cpumask_empty(&cpu_mask))
173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
174 174
175 preempt_enable(); 175 preempt_enable();
@@ -186,11 +186,11 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
186 cpumask_t cpu_mask; 186 cpumask_t cpu_mask;
187 187
188 preempt_disable(); 188 preempt_disable();
189 cpu_mask = mm->cpu_vm_mask; 189 cpumask_copy(&cpu_mask, mm_cpumask(mm));
190 cpu_clear(smp_processor_id(), cpu_mask); 190 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
191 191
192 local_flush_tlb_page(mm, va); 192 local_flush_tlb_page(mm, va);
193 if (!cpus_empty(cpu_mask)) 193 if (!cpumask_empty(&cpu_mask))
194 flush_tlb_others(cpu_mask, mm, va); 194 flush_tlb_others(cpu_mask, mm, va);
195 195
196 preempt_enable(); 196 preempt_enable();