diff options
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r-- | arch/x86/xen/smp.c | 53 |
1 files changed, 19 insertions, 34 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c44e2069c7c7..585a6e330837 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | |||
50 | */ | 50 | */ |
51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | 51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) |
52 | { | 52 | { |
53 | #ifdef CONFIG_X86_32 | 53 | inc_irq_stat(irq_resched_count); |
54 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
55 | #else | ||
56 | add_pda(irq_resched_count, 1); | ||
57 | #endif | ||
58 | 54 | ||
59 | return IRQ_HANDLED; | 55 | return IRQ_HANDLED; |
60 | } | 56 | } |
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void) | |||
78 | xen_setup_cpu_clockevents(); | 74 | xen_setup_cpu_clockevents(); |
79 | 75 | ||
80 | cpu_set(cpu, cpu_online_map); | 76 | cpu_set(cpu, cpu_online_map); |
81 | x86_write_percpu(cpu_state, CPU_ONLINE); | 77 | percpu_write(cpu_state, CPU_ONLINE); |
82 | wmb(); | 78 | wmb(); |
83 | 79 | ||
84 | /* We can take interrupts now: we're officially "up". */ | 80 | /* We can take interrupts now: we're officially "up". */ |
@@ -162,7 +158,7 @@ static void __init xen_fill_possible_map(void) | |||
162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 158 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
163 | if (rc >= 0) { | 159 | if (rc >= 0) { |
164 | num_processors++; | 160 | num_processors++; |
165 | cpu_set(i, cpu_possible_map); | 161 | set_cpu_possible(i, true); |
166 | } | 162 | } |
167 | } | 163 | } |
168 | } | 164 | } |
@@ -174,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void) | |||
174 | 170 | ||
175 | /* We've switched to the "real" per-cpu gdt, so make sure the | 171 | /* We've switched to the "real" per-cpu gdt, so make sure the |
176 | old memory can be recycled */ | 172 | old memory can be recycled */ |
177 | make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); | 173 | make_lowmem_page_readwrite(xen_initial_gdt); |
178 | 174 | ||
179 | xen_setup_vcpu_info_placement(); | 175 | xen_setup_vcpu_info_placement(); |
180 | } | 176 | } |
@@ -201,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
201 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | 197 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { |
202 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) | 198 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) |
203 | continue; | 199 | continue; |
204 | cpu_clear(cpu, cpu_possible_map); | 200 | set_cpu_possible(cpu, false); |
205 | } | 201 | } |
206 | 202 | ||
207 | for_each_possible_cpu (cpu) { | 203 | for_each_possible_cpu (cpu) { |
@@ -214,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
214 | if (IS_ERR(idle)) | 210 | if (IS_ERR(idle)) |
215 | panic("failed fork for CPU %d", cpu); | 211 | panic("failed fork for CPU %d", cpu); |
216 | 212 | ||
217 | cpu_set(cpu, cpu_present_map); | 213 | set_cpu_present(cpu, true); |
218 | } | 214 | } |
219 | } | 215 | } |
220 | 216 | ||
@@ -223,6 +219,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
223 | { | 219 | { |
224 | struct vcpu_guest_context *ctxt; | 220 | struct vcpu_guest_context *ctxt; |
225 | struct desc_struct *gdt; | 221 | struct desc_struct *gdt; |
222 | unsigned long gdt_mfn; | ||
226 | 223 | ||
227 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) | 224 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) |
228 | return 0; | 225 | return 0; |
@@ -239,6 +236,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
239 | ctxt->user_regs.ss = __KERNEL_DS; | 236 | ctxt->user_regs.ss = __KERNEL_DS; |
240 | #ifdef CONFIG_X86_32 | 237 | #ifdef CONFIG_X86_32 |
241 | ctxt->user_regs.fs = __KERNEL_PERCPU; | 238 | ctxt->user_regs.fs = __KERNEL_PERCPU; |
239 | #else | ||
240 | ctxt->gs_base_kernel = per_cpu_offset(cpu); | ||
242 | #endif | 241 | #endif |
243 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; | 242 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; |
244 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ | 243 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ |
@@ -250,9 +249,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
250 | ctxt->ldt_ents = 0; | 249 | ctxt->ldt_ents = 0; |
251 | 250 | ||
252 | BUG_ON((unsigned long)gdt & ~PAGE_MASK); | 251 | BUG_ON((unsigned long)gdt & ~PAGE_MASK); |
252 | |||
253 | gdt_mfn = arbitrary_virt_to_mfn(gdt); | ||
253 | make_lowmem_page_readonly(gdt); | 254 | make_lowmem_page_readonly(gdt); |
255 | make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); | ||
254 | 256 | ||
255 | ctxt->gdt_frames[0] = virt_to_mfn(gdt); | 257 | ctxt->gdt_frames[0] = gdt_mfn; |
256 | ctxt->gdt_ents = GDT_ENTRIES; | 258 | ctxt->gdt_ents = GDT_ENTRIES; |
257 | 259 | ||
258 | ctxt->user_regs.cs = __KERNEL_CS; | 260 | ctxt->user_regs.cs = __KERNEL_CS; |
@@ -283,23 +285,14 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
283 | struct task_struct *idle = idle_task(cpu); | 285 | struct task_struct *idle = idle_task(cpu); |
284 | int rc; | 286 | int rc; |
285 | 287 | ||
286 | #ifdef CONFIG_X86_64 | ||
287 | /* Allocate node local memory for AP pdas */ | ||
288 | WARN_ON(cpu == 0); | ||
289 | if (cpu > 0) { | ||
290 | rc = get_local_pda(cpu); | ||
291 | if (rc) | ||
292 | return rc; | ||
293 | } | ||
294 | #endif | ||
295 | |||
296 | #ifdef CONFIG_X86_32 | ||
297 | init_gdt(cpu); | ||
298 | per_cpu(current_task, cpu) = idle; | 288 | per_cpu(current_task, cpu) = idle; |
289 | #ifdef CONFIG_X86_32 | ||
299 | irq_ctx_init(cpu); | 290 | irq_ctx_init(cpu); |
300 | #else | 291 | #else |
301 | cpu_pda(cpu)->pcurrent = idle; | ||
302 | clear_tsk_thread_flag(idle, TIF_FORK); | 292 | clear_tsk_thread_flag(idle, TIF_FORK); |
293 | per_cpu(kernel_stack, cpu) = | ||
294 | (unsigned long)task_stack_page(idle) - | ||
295 | KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
303 | #endif | 296 | #endif |
304 | xen_setup_timer(cpu); | 297 | xen_setup_timer(cpu); |
305 | xen_init_lock_cpu(cpu); | 298 | xen_init_lock_cpu(cpu); |
@@ -445,11 +438,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |||
445 | { | 438 | { |
446 | irq_enter(); | 439 | irq_enter(); |
447 | generic_smp_call_function_interrupt(); | 440 | generic_smp_call_function_interrupt(); |
448 | #ifdef CONFIG_X86_32 | 441 | inc_irq_stat(irq_call_count); |
449 | __get_cpu_var(irq_stat).irq_call_count++; | ||
450 | #else | ||
451 | add_pda(irq_call_count, 1); | ||
452 | #endif | ||
453 | irq_exit(); | 442 | irq_exit(); |
454 | 443 | ||
455 | return IRQ_HANDLED; | 444 | return IRQ_HANDLED; |
@@ -459,11 +448,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
459 | { | 448 | { |
460 | irq_enter(); | 449 | irq_enter(); |
461 | generic_smp_call_function_single_interrupt(); | 450 | generic_smp_call_function_single_interrupt(); |
462 | #ifdef CONFIG_X86_32 | 451 | inc_irq_stat(irq_call_count); |
463 | __get_cpu_var(irq_stat).irq_call_count++; | ||
464 | #else | ||
465 | add_pda(irq_call_count, 1); | ||
466 | #endif | ||
467 | irq_exit(); | 452 | irq_exit(); |
468 | 453 | ||
469 | return IRQ_HANDLED; | 454 | return IRQ_HANDLED; |