diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-02-11 14:52:22 -0500 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-02-11 14:52:22 -0500 |
commit | 9049a11de73d3ecc623f1903100d099f82ede56c (patch) | |
tree | c03d130d58168e337a66fe999682452b7a02b42b /arch/x86/xen/smp.c | |
parent | c47c1b1f3a9d6973108020df1dcab7604f7774dd (diff) | |
parent | e4d0407185cdbdcfd99fc23bde2e5454bbc46329 (diff) |
Merge commit 'remotes/tip/x86/paravirt' into x86/untangle2
* commit 'remotes/tip/x86/paravirt': (175 commits)
xen: use direct ops on 64-bit
xen: make direct versions of irq_enable/disable/save/restore to common code
xen: setup percpu data pointers
xen: fix 32-bit build resulting from mmu move
x86/paravirt: return full 64-bit result
x86, percpu: fix kexec with vmlinux
x86/vmi: fix interrupt enable/disable/save/restore calling convention.
x86/paravirt: don't restore second return reg
xen: setup percpu data pointers
x86: split loading percpu segments from loading gdt
x86: pass in cpu number to switch_to_new_gdt()
x86: UV fix uv_flush_send_and_wait()
x86/paravirt: fix missing callee-save call on pud_val
x86/paravirt: use callee-saved convention for pte_val/make_pte/etc
x86/paravirt: implement PVOP_CALL macros for callee-save functions
x86/paravirt: add register-saving thunks to reduce caller register pressure
x86/paravirt: selectively save/restore regs around pvops calls
x86: fix paravirt clobber in entry_64.S
x86/pvops: add a paravirt_ident functions to allow special patching
xen: move remaining mmu-related stuff into mmu.c
...
Conflicts:
arch/x86/mach-voyager/voyager_smp.c
arch/x86/mm/fault.c
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r-- | arch/x86/xen/smp.c | 41 |
1 files changed, 11 insertions, 30 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c44e2069c7c7..035582ae815d 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | |||
50 | */ | 50 | */ |
51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | 51 | static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) |
52 | { | 52 | { |
53 | #ifdef CONFIG_X86_32 | 53 | inc_irq_stat(irq_resched_count); |
54 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
55 | #else | ||
56 | add_pda(irq_resched_count, 1); | ||
57 | #endif | ||
58 | 54 | ||
59 | return IRQ_HANDLED; | 55 | return IRQ_HANDLED; |
60 | } | 56 | } |
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void) | |||
78 | xen_setup_cpu_clockevents(); | 74 | xen_setup_cpu_clockevents(); |
79 | 75 | ||
80 | cpu_set(cpu, cpu_online_map); | 76 | cpu_set(cpu, cpu_online_map); |
81 | x86_write_percpu(cpu_state, CPU_ONLINE); | 77 | percpu_write(cpu_state, CPU_ONLINE); |
82 | wmb(); | 78 | wmb(); |
83 | 79 | ||
84 | /* We can take interrupts now: we're officially "up". */ | 80 | /* We can take interrupts now: we're officially "up". */ |
@@ -174,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void) | |||
174 | 170 | ||
175 | /* We've switched to the "real" per-cpu gdt, so make sure the | 171 | /* We've switched to the "real" per-cpu gdt, so make sure the |
176 | old memory can be recycled */ | 172 | old memory can be recycled */ |
177 | make_lowmem_page_readwrite(&per_cpu_var(gdt_page)); | 173 | make_lowmem_page_readwrite(xen_initial_gdt); |
178 | 174 | ||
179 | xen_setup_vcpu_info_placement(); | 175 | xen_setup_vcpu_info_placement(); |
180 | } | 176 | } |
@@ -239,6 +235,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
239 | ctxt->user_regs.ss = __KERNEL_DS; | 235 | ctxt->user_regs.ss = __KERNEL_DS; |
240 | #ifdef CONFIG_X86_32 | 236 | #ifdef CONFIG_X86_32 |
241 | ctxt->user_regs.fs = __KERNEL_PERCPU; | 237 | ctxt->user_regs.fs = __KERNEL_PERCPU; |
238 | #else | ||
239 | ctxt->gs_base_kernel = per_cpu_offset(cpu); | ||
242 | #endif | 240 | #endif |
243 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; | 241 | ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; |
244 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ | 242 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ |
@@ -283,23 +281,14 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
283 | struct task_struct *idle = idle_task(cpu); | 281 | struct task_struct *idle = idle_task(cpu); |
284 | int rc; | 282 | int rc; |
285 | 283 | ||
286 | #ifdef CONFIG_X86_64 | ||
287 | /* Allocate node local memory for AP pdas */ | ||
288 | WARN_ON(cpu == 0); | ||
289 | if (cpu > 0) { | ||
290 | rc = get_local_pda(cpu); | ||
291 | if (rc) | ||
292 | return rc; | ||
293 | } | ||
294 | #endif | ||
295 | |||
296 | #ifdef CONFIG_X86_32 | ||
297 | init_gdt(cpu); | ||
298 | per_cpu(current_task, cpu) = idle; | 284 | per_cpu(current_task, cpu) = idle; |
285 | #ifdef CONFIG_X86_32 | ||
299 | irq_ctx_init(cpu); | 286 | irq_ctx_init(cpu); |
300 | #else | 287 | #else |
301 | cpu_pda(cpu)->pcurrent = idle; | ||
302 | clear_tsk_thread_flag(idle, TIF_FORK); | 288 | clear_tsk_thread_flag(idle, TIF_FORK); |
289 | per_cpu(kernel_stack, cpu) = | ||
290 | (unsigned long)task_stack_page(idle) - | ||
291 | KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
303 | #endif | 292 | #endif |
304 | xen_setup_timer(cpu); | 293 | xen_setup_timer(cpu); |
305 | xen_init_lock_cpu(cpu); | 294 | xen_init_lock_cpu(cpu); |
@@ -445,11 +434,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |||
445 | { | 434 | { |
446 | irq_enter(); | 435 | irq_enter(); |
447 | generic_smp_call_function_interrupt(); | 436 | generic_smp_call_function_interrupt(); |
448 | #ifdef CONFIG_X86_32 | 437 | inc_irq_stat(irq_call_count); |
449 | __get_cpu_var(irq_stat).irq_call_count++; | ||
450 | #else | ||
451 | add_pda(irq_call_count, 1); | ||
452 | #endif | ||
453 | irq_exit(); | 438 | irq_exit(); |
454 | 439 | ||
455 | return IRQ_HANDLED; | 440 | return IRQ_HANDLED; |
@@ -459,11 +444,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
459 | { | 444 | { |
460 | irq_enter(); | 445 | irq_enter(); |
461 | generic_smp_call_function_single_interrupt(); | 446 | generic_smp_call_function_single_interrupt(); |
462 | #ifdef CONFIG_X86_32 | 447 | inc_irq_stat(irq_call_count); |
463 | __get_cpu_var(irq_stat).irq_call_count++; | ||
464 | #else | ||
465 | add_pda(irq_call_count, 1); | ||
466 | #endif | ||
467 | irq_exit(); | 448 | irq_exit(); |
468 | 449 | ||
469 | return IRQ_HANDLED; | 450 | return IRQ_HANDLED; |