diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/time.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 15 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 4 |
9 files changed, 22 insertions, 31 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index f923203dc39a..4a2d4e0c18d9 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
37 | 37 | ||
38 | if (likely(prev != next)) { | 38 | if (likely(prev != next)) { |
39 | /* stop flush ipis for the previous mm */ | 39 | /* stop flush ipis for the previous mm */ |
40 | cpu_clear(cpu, prev->cpu_vm_mask); | 40 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
41 | #ifdef CONFIG_SMP | 41 | #ifdef CONFIG_SMP |
42 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 42 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
43 | percpu_write(cpu_tlbstate.active_mm, next); | 43 | percpu_write(cpu_tlbstate.active_mm, next); |
44 | #endif | 44 | #endif |
45 | cpu_set(cpu, next->cpu_vm_mask); | 45 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
46 | 46 | ||
47 | /* Re-load page tables */ | 47 | /* Re-load page tables */ |
48 | load_cr3(next->pgd); | 48 | load_cr3(next->pgd); |
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
58 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 58 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
59 | BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); | 59 | BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); |
60 | 60 | ||
61 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 61 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { |
62 | /* We were in lazy tlb mode and leave_mm disabled | 62 | /* We were in lazy tlb mode and leave_mm disabled |
63 | * tlb flush IPI delivery. We must reload CR3 | 63 | * tlb flush IPI delivery. We must reload CR3 |
64 | * to make sure to use no freed page tables. | 64 | * to make sure to use no freed page tables. |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 6a84ed166aec..1e796782cd7b 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
121 | smp_ops.send_call_func_single_ipi(cpu); | 121 | smp_ops.send_call_func_single_ipi(cpu); |
122 | } | 122 | } |
123 | 123 | ||
124 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
125 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 124 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
126 | { | 125 | { |
127 | smp_ops.send_call_func_ipi(mask); | 126 | smp_ops.send_call_func_ipi(mask); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 64970b9885f2..dc69f28489f5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node) | |||
227 | 227 | ||
228 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 228 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
229 | if (cfg) { | 229 | if (cfg) { |
230 | if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { | 230 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { |
231 | kfree(cfg); | 231 | kfree(cfg); |
232 | cfg = NULL; | 232 | cfg = NULL; |
233 | } else if (!alloc_cpumask_var_node(&cfg->old_domain, | 233 | } else if (!zalloc_cpumask_var_node(&cfg->old_domain, |
234 | GFP_ATOMIC, node)) { | 234 | GFP_ATOMIC, node)) { |
235 | free_cpumask_var(cfg->domain); | 235 | free_cpumask_var(cfg->domain); |
236 | kfree(cfg); | 236 | kfree(cfg); |
237 | cfg = NULL; | 237 | cfg = NULL; |
238 | } else { | ||
239 | cpumask_clear(cfg->domain); | ||
240 | cpumask_clear(cfg->old_domain); | ||
241 | } | 238 | } |
242 | } | 239 | } |
243 | 240 | ||
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 71f1d99a635d..ec6ef60cbd17 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
67 | #ifdef CONFIG_SMP | 67 | #ifdef CONFIG_SMP |
68 | preempt_disable(); | 68 | preempt_disable(); |
69 | load_LDT(pc); | 69 | load_LDT(pc); |
70 | if (!cpus_equal(current->mm->cpu_vm_mask, | 70 | if (!cpumask_equal(mm_cpumask(current->mm), |
71 | cpumask_of_cpu(smp_processor_id()))) | 71 | cpumask_of(smp_processor_id()))) |
72 | smp_call_function(flush_ldt, current->mm, 1); | 72 | smp_call_function(flush_ldt, current->mm, 1); |
73 | preempt_enable(); | 73 | preempt_enable(); |
74 | #else | 74 | #else |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 847ab4160315..5284cd2b5776 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
555 | void __init init_c1e_mask(void) | 555 | void __init init_c1e_mask(void) |
556 | { | 556 | { |
557 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | 557 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ |
558 | if (pm_idle == c1e_idle) { | 558 | if (pm_idle == c1e_idle) |
559 | alloc_cpumask_var(&c1e_mask, GFP_KERNEL); | 559 | zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); |
560 | cpumask_clear(c1e_mask); | ||
561 | } | ||
562 | } | 560 | } |
563 | 561 | ||
564 | static int __init idle_setup(char *str) | 562 | static int __init idle_setup(char *str) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 09c5e077dff7..565ebc65920e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1059 | #endif | 1059 | #endif |
1060 | current_thread_info()->cpu = 0; /* needed? */ | 1060 | current_thread_info()->cpu = 0; /* needed? */ |
1061 | for_each_possible_cpu(i) { | 1061 | for_each_possible_cpu(i) { |
1062 | alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | 1062 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1063 | alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | 1063 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); |
1064 | alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | 1064 | zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); |
1065 | cpumask_clear(per_cpu(cpu_core_map, i)); | ||
1066 | cpumask_clear(per_cpu(cpu_sibling_map, i)); | ||
1067 | cpumask_clear(cpu_data(i).llc_shared_map); | ||
1068 | } | 1065 | } |
1069 | set_cpu_sibling_map(0); | 1066 | set_cpu_sibling_map(0); |
1070 | 1067 | ||
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index e293ac56c723..dcb00d278512 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c | |||
@@ -93,7 +93,6 @@ static struct irqaction irq0 = { | |||
93 | 93 | ||
94 | void __init setup_default_timer_irq(void) | 94 | void __init setup_default_timer_irq(void) |
95 | { | 95 | { |
96 | irq0.mask = cpumask_of_cpu(0); | ||
97 | setup_irq(0, &irq0); | 96 | setup_irq(0, &irq0); |
98 | } | 97 | } |
99 | 98 | ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index c814e144a3f0..36fe08eeb5c3 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -59,7 +59,8 @@ void leave_mm(int cpu) | |||
59 | { | 59 | { |
60 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | 60 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
61 | BUG(); | 61 | BUG(); |
62 | cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); | 62 | cpumask_clear_cpu(cpu, |
63 | mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); | ||
63 | load_cr3(swapper_pg_dir); | 64 | load_cr3(swapper_pg_dir); |
64 | } | 65 | } |
65 | EXPORT_SYMBOL_GPL(leave_mm); | 66 | EXPORT_SYMBOL_GPL(leave_mm); |
@@ -234,8 +235,8 @@ void flush_tlb_current_task(void) | |||
234 | preempt_disable(); | 235 | preempt_disable(); |
235 | 236 | ||
236 | local_flush_tlb(); | 237 | local_flush_tlb(); |
237 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | 238 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
238 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | 239 | flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); |
239 | preempt_enable(); | 240 | preempt_enable(); |
240 | } | 241 | } |
241 | 242 | ||
@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
249 | else | 250 | else |
250 | leave_mm(smp_processor_id()); | 251 | leave_mm(smp_processor_id()); |
251 | } | 252 | } |
252 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | 253 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
253 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | 254 | flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); |
254 | 255 | ||
255 | preempt_enable(); | 256 | preempt_enable(); |
256 | } | 257 | } |
@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
268 | leave_mm(smp_processor_id()); | 269 | leave_mm(smp_processor_id()); |
269 | } | 270 | } |
270 | 271 | ||
271 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | 272 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
272 | flush_tlb_others(&mm->cpu_vm_mask, mm, va); | 273 | flush_tlb_others(mm_cpumask(mm), mm, va); |
273 | 274 | ||
274 | preempt_enable(); | 275 | preempt_enable(); |
275 | } | 276 | } |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 093dd59b5385..3bf7b1d250ce 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1165 | /* Get the "official" set of cpus referring to our pagetable. */ | 1165 | /* Get the "official" set of cpus referring to our pagetable. */ |
1166 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { | 1166 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { |
1167 | for_each_online_cpu(cpu) { | 1167 | for_each_online_cpu(cpu) { |
1168 | if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) | 1168 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) |
1169 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) | 1169 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) |
1170 | continue; | 1170 | continue; |
1171 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); | 1171 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); |
1172 | } | 1172 | } |
1173 | return; | 1173 | return; |
1174 | } | 1174 | } |
1175 | cpumask_copy(mask, &mm->cpu_vm_mask); | 1175 | cpumask_copy(mask, mm_cpumask(mm)); |
1176 | 1176 | ||
1177 | /* It's possible that a vcpu may have a stale reference to our | 1177 | /* It's possible that a vcpu may have a stale reference to our |
1178 | cr3, because its in lazy mode, and it hasn't yet flushed | 1178 | cr3, because its in lazy mode, and it hasn't yet flushed |