aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/xen/Kconfig2
-rw-r--r--arch/i386/xen/Makefile2
-rw-r--r--arch/i386/xen/enlighten.c115
-rw-r--r--arch/i386/xen/events.c80
-rw-r--r--arch/i386/xen/mmu.c69
-rw-r--r--arch/i386/xen/mmu.h13
-rw-r--r--arch/i386/xen/setup.c5
-rw-r--r--arch/i386/xen/smp.c407
-rw-r--r--arch/i386/xen/time.c13
-rw-r--r--arch/i386/xen/xen-ops.h25
10 files changed, 682 insertions, 49 deletions
diff --git a/arch/i386/xen/Kconfig b/arch/i386/xen/Kconfig
index 7c5550058c15..b7697ff22361 100644
--- a/arch/i386/xen/Kconfig
+++ b/arch/i386/xen/Kconfig
@@ -4,7 +4,7 @@
4 4
5config XEN 5config XEN
6 bool "Enable support for Xen hypervisor" 6 bool "Enable support for Xen hypervisor"
7 depends on PARAVIRT && X86_CMPXCHG && X86_TSC && !(PREEMPT || SMP || NEED_MULTIPLE_NODES) 7 depends on PARAVIRT && X86_CMPXCHG && X86_TSC && !(PREEMPT || NEED_MULTIPLE_NODES)
8 help 8 help
9 This is the Linux Xen port. Enabling this will allow the 9 This is the Linux Xen port. Enabling this will allow the
10 kernel to boot in a paravirtualized environment under the 10 kernel to boot in a paravirtualized environment under the
diff --git a/arch/i386/xen/Makefile b/arch/i386/xen/Makefile
index bf51cabed0d2..fd05f243a3f8 100644
--- a/arch/i386/xen/Makefile
+++ b/arch/i386/xen/Makefile
@@ -1,2 +1,4 @@
1obj-y := enlighten.o setup.o features.o multicalls.o mmu.o \ 1obj-y := enlighten.o setup.o features.o multicalls.o mmu.o \
2 events.o time.o 2 events.o time.o
3
4obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c
index a9ba834295a2..de62d66e0893 100644
--- a/arch/i386/xen/enlighten.c
+++ b/arch/i386/xen/enlighten.c
@@ -24,6 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/page-flags.h> 25#include <linux/page-flags.h>
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/smp.h>
27 28
28#include <xen/interface/xen.h> 29#include <xen/interface/xen.h>
29#include <xen/interface/physdev.h> 30#include <xen/interface/physdev.h>
@@ -40,6 +41,7 @@
40#include <asm/setup.h> 41#include <asm/setup.h>
41#include <asm/desc.h> 42#include <asm/desc.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/tlbflush.h>
43 45
44#include "xen-ops.h" 46#include "xen-ops.h"
45#include "mmu.h" 47#include "mmu.h"
@@ -56,7 +58,7 @@ DEFINE_PER_CPU(unsigned long, xen_cr3);
56struct start_info *xen_start_info; 58struct start_info *xen_start_info;
57EXPORT_SYMBOL_GPL(xen_start_info); 59EXPORT_SYMBOL_GPL(xen_start_info);
58 60
59static void xen_vcpu_setup(int cpu) 61void xen_vcpu_setup(int cpu)
60{ 62{
61 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 63 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
62} 64}
@@ -347,23 +349,14 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
347 } 349 }
348} 350}
349 351
350/* Load a new IDT into Xen. In principle this can be per-CPU, so we 352static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
351 hold a spinlock to protect the static traps[] array (static because 353 struct trap_info *traps)
352 it avoids allocation, and saves stack space). */
353static void xen_load_idt(const struct Xgt_desc_struct *desc)
354{ 354{
355 static DEFINE_SPINLOCK(lock);
356 static struct trap_info traps[257];
357
358 int cpu = smp_processor_id();
359 unsigned in, out, count; 355 unsigned in, out, count;
360 356
361 per_cpu(idt_desc, cpu) = *desc;
362
363 count = (desc->size+1) / 8; 357 count = (desc->size+1) / 8;
364 BUG_ON(count > 256); 358 BUG_ON(count > 256);
365 359
366 spin_lock(&lock);
367 for (in = out = 0; in < count; in++) { 360 for (in = out = 0; in < count; in++) {
368 const u32 *entry = (u32 *)(desc->address + in * 8); 361 const u32 *entry = (u32 *)(desc->address + in * 8);
369 362
@@ -371,6 +364,31 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc)
371 out++; 364 out++;
372 } 365 }
373 traps[out].address = 0; 366 traps[out].address = 0;
367}
368
369void xen_copy_trap_info(struct trap_info *traps)
370{
371 const struct Xgt_desc_struct *desc = &get_cpu_var(idt_desc);
372
373 xen_convert_trap_info(desc, traps);
374
375 put_cpu_var(idt_desc);
376}
377
378/* Load a new IDT into Xen. In principle this can be per-CPU, so we
379 hold a spinlock to protect the static traps[] array (static because
380 it avoids allocation, and saves stack space). */
381static void xen_load_idt(const struct Xgt_desc_struct *desc)
382{
383 static DEFINE_SPINLOCK(lock);
384 static struct trap_info traps[257];
385 int cpu = smp_processor_id();
386
387 per_cpu(idt_desc, cpu) = *desc;
388
389 spin_lock(&lock);
390
391 xen_convert_trap_info(desc, traps);
374 392
375 xen_mc_flush(); 393 xen_mc_flush();
376 if (HYPERVISOR_set_trap_table(traps)) 394 if (HYPERVISOR_set_trap_table(traps))
@@ -428,6 +446,12 @@ static unsigned long xen_apic_read(unsigned long reg)
428{ 446{
429 return 0; 447 return 0;
430} 448}
449
450static void xen_apic_write(unsigned long reg, unsigned long val)
451{
452 /* Warn to see if there's any stray references */
453 WARN_ON(1);
454}
431#endif 455#endif
432 456
433static void xen_flush_tlb(void) 457static void xen_flush_tlb(void)
@@ -449,6 +473,40 @@ static void xen_flush_tlb_single(unsigned long addr)
449 BUG(); 473 BUG();
450} 474}
451 475
476static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
477 unsigned long va)
478{
479 struct mmuext_op op;
480 cpumask_t cpumask = *cpus;
481
482 /*
483 * A couple of (to be removed) sanity checks:
484 *
485 * - current CPU must not be in mask
486 * - mask must exist :)
487 */
488 BUG_ON(cpus_empty(cpumask));
489 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
490 BUG_ON(!mm);
491
492 /* If a CPU which we ran on has gone down, OK. */
493 cpus_and(cpumask, cpumask, cpu_online_map);
494 if (cpus_empty(cpumask))
495 return;
496
497 if (va == TLB_FLUSH_ALL) {
498 op.cmd = MMUEXT_TLB_FLUSH_MULTI;
499 op.arg2.vcpumask = (void *)cpus;
500 } else {
501 op.cmd = MMUEXT_INVLPG_MULTI;
502 op.arg1.linear_addr = va;
503 op.arg2.vcpumask = (void *)cpus;
504 }
505
506 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
507 BUG();
508}
509
452static unsigned long xen_read_cr2(void) 510static unsigned long xen_read_cr2(void)
453{ 511{
454 return x86_read_percpu(xen_vcpu)->arch.cr2; 512 return x86_read_percpu(xen_vcpu)->arch.cr2;
@@ -460,18 +518,6 @@ static void xen_write_cr4(unsigned long cr4)
460 native_write_cr4(cr4 & ~X86_CR4_TSD); 518 native_write_cr4(cr4 & ~X86_CR4_TSD);
461} 519}
462 520
463/*
464 * Page-directory addresses above 4GB do not fit into architectural %cr3.
465 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
466 * must use the following accessor macros to pack/unpack valid MFNs.
467 *
468 * Note that Xen is using the fact that the pagetable base is always
469 * page-aligned, and putting the 12 MSB of the address into the 12 LSB
470 * of cr3.
471 */
472#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
473#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
474
475static unsigned long xen_read_cr3(void) 521static unsigned long xen_read_cr3(void)
476{ 522{
477 return x86_read_percpu(xen_cr3); 523 return x86_read_percpu(xen_cr3);
@@ -740,8 +786,8 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
740 .io_delay = xen_io_delay, 786 .io_delay = xen_io_delay,
741 787
742#ifdef CONFIG_X86_LOCAL_APIC 788#ifdef CONFIG_X86_LOCAL_APIC
743 .apic_write = paravirt_nop, 789 .apic_write = xen_apic_write,
744 .apic_write_atomic = paravirt_nop, 790 .apic_write_atomic = xen_apic_write,
745 .apic_read = xen_apic_read, 791 .apic_read = xen_apic_read,
746 .setup_boot_clock = paravirt_nop, 792 .setup_boot_clock = paravirt_nop,
747 .setup_secondary_clock = paravirt_nop, 793 .setup_secondary_clock = paravirt_nop,
@@ -751,6 +797,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
751 .flush_tlb_user = xen_flush_tlb, 797 .flush_tlb_user = xen_flush_tlb,
752 .flush_tlb_kernel = xen_flush_tlb, 798 .flush_tlb_kernel = xen_flush_tlb,
753 .flush_tlb_single = xen_flush_tlb_single, 799 .flush_tlb_single = xen_flush_tlb_single,
800 .flush_tlb_others = xen_flush_tlb_others,
754 801
755 .pte_update = paravirt_nop, 802 .pte_update = paravirt_nop,
756 .pte_update_defer = paravirt_nop, 803 .pte_update_defer = paravirt_nop,
@@ -796,6 +843,19 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
796 .set_lazy_mode = xen_set_lazy_mode, 843 .set_lazy_mode = xen_set_lazy_mode,
797}; 844};
798 845
846#ifdef CONFIG_SMP
847static const struct smp_ops xen_smp_ops __initdata = {
848 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
849 .smp_prepare_cpus = xen_smp_prepare_cpus,
850 .cpu_up = xen_cpu_up,
851 .smp_cpus_done = xen_smp_cpus_done,
852
853 .smp_send_stop = xen_smp_send_stop,
854 .smp_send_reschedule = xen_smp_send_reschedule,
855 .smp_call_function_mask = xen_smp_call_function_mask,
856};
857#endif /* CONFIG_SMP */
858
799/* First C function to be called on Xen boot */ 859/* First C function to be called on Xen boot */
800asmlinkage void __init xen_start_kernel(void) 860asmlinkage void __init xen_start_kernel(void)
801{ 861{
@@ -808,6 +868,9 @@ asmlinkage void __init xen_start_kernel(void)
808 868
809 /* Install Xen paravirt ops */ 869 /* Install Xen paravirt ops */
810 paravirt_ops = xen_paravirt_ops; 870 paravirt_ops = xen_paravirt_ops;
871#ifdef CONFIG_SMP
872 smp_ops = xen_smp_ops;
873#endif
811 874
812 xen_setup_features(); 875 xen_setup_features();
813 876
diff --git a/arch/i386/xen/events.c b/arch/i386/xen/events.c
index e7c5d00ab4fe..4103b8bf22fd 100644
--- a/arch/i386/xen/events.c
+++ b/arch/i386/xen/events.c
@@ -47,6 +47,9 @@ static DEFINE_SPINLOCK(irq_mapping_update_lock);
47/* IRQ <-> VIRQ mapping. */ 47/* IRQ <-> VIRQ mapping. */
48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; 48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
49 49
50/* IRQ <-> IPI mapping */
51static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
52
50/* Packed IRQ information: binding type, sub-type index, and event channel. */ 53/* Packed IRQ information: binding type, sub-type index, and event channel. */
51struct packed_irq 54struct packed_irq
52{ 55{
@@ -58,7 +61,13 @@ struct packed_irq
58static struct packed_irq irq_info[NR_IRQS]; 61static struct packed_irq irq_info[NR_IRQS];
59 62
60/* Binding types. */ 63/* Binding types. */
61enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN }; 64enum {
65 IRQT_UNBOUND,
66 IRQT_PIRQ,
67 IRQT_VIRQ,
68 IRQT_IPI,
69 IRQT_EVTCHN
70};
62 71
63/* Convenient shorthand for packed representation of an unbound IRQ. */ 72/* Convenient shorthand for packed representation of an unbound IRQ. */
64#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) 73#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
@@ -261,6 +270,45 @@ static int bind_evtchn_to_irq(unsigned int evtchn)
261 return irq; 270 return irq;
262} 271}
263 272
273static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
274{
275 struct evtchn_bind_ipi bind_ipi;
276 int evtchn, irq;
277
278 spin_lock(&irq_mapping_update_lock);
279
280 irq = per_cpu(ipi_to_irq, cpu)[ipi];
281 if (irq == -1) {
282 irq = find_unbound_irq();
283 if (irq < 0)
284 goto out;
285
286 dynamic_irq_init(irq);
287 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
288 handle_level_irq, "ipi");
289
290 bind_ipi.vcpu = cpu;
291 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
292 &bind_ipi) != 0)
293 BUG();
294 evtchn = bind_ipi.port;
295
296 evtchn_to_irq[evtchn] = irq;
297 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
298
299 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
300
301 bind_evtchn_to_cpu(evtchn, cpu);
302 }
303
304 irq_bindcount[irq]++;
305
306 out:
307 spin_unlock(&irq_mapping_update_lock);
308 return irq;
309}
310
311
264static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 312static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
265{ 313{
266 struct evtchn_bind_virq bind_virq; 314 struct evtchn_bind_virq bind_virq;
@@ -369,6 +417,28 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
369} 417}
370EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 418EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
371 419
420int bind_ipi_to_irqhandler(enum ipi_vector ipi,
421 unsigned int cpu,
422 irq_handler_t handler,
423 unsigned long irqflags,
424 const char *devname,
425 void *dev_id)
426{
427 int irq, retval;
428
429 irq = bind_ipi_to_irq(ipi, cpu);
430 if (irq < 0)
431 return irq;
432
433 retval = request_irq(irq, handler, irqflags, devname, dev_id);
434 if (retval != 0) {
435 unbind_from_irq(irq);
436 return retval;
437 }
438
439 return irq;
440}
441
372void unbind_from_irqhandler(unsigned int irq, void *dev_id) 442void unbind_from_irqhandler(unsigned int irq, void *dev_id)
373{ 443{
374 free_irq(irq, dev_id); 444 free_irq(irq, dev_id);
@@ -376,6 +446,14 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
376} 446}
377EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 447EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
378 448
449void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
450{
451 int irq = per_cpu(ipi_to_irq, cpu)[vector];
452 BUG_ON(irq < 0);
453 notify_remote_via_irq(irq);
454}
455
456
379/* 457/*
380 * Search the CPUs pending events bitmasks. For each one found, map 458 * Search the CPUs pending events bitmasks. For each one found, map
381 * the event number to an irq, and feed it into do_IRQ() for 459 * the event number to an irq, and feed it into do_IRQ() for
diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c
index 53501ce2d15c..bc49ef846203 100644
--- a/arch/i386/xen/mmu.c
+++ b/arch/i386/xen/mmu.c
@@ -391,8 +391,12 @@ void xen_pgd_pin(pgd_t *pgd)
391 391
392 xen_mc_batch(); 392 xen_mc_batch();
393 393
394 if (pgd_walk(pgd, pin_page, TASK_SIZE)) 394 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
395 /* re-enable interrupts for kmap_flush_unused */
396 xen_mc_issue(0);
395 kmap_flush_unused(); 397 kmap_flush_unused();
398 xen_mc_batch();
399 }
396 400
397 mcs = __xen_mc_entry(sizeof(*op)); 401 mcs = __xen_mc_entry(sizeof(*op));
398 op = mcs.args; 402 op = mcs.args;
@@ -474,27 +478,58 @@ void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
474 spin_unlock(&mm->page_table_lock); 478 spin_unlock(&mm->page_table_lock);
475} 479}
476 480
477void xen_exit_mmap(struct mm_struct *mm)
478{
479 struct task_struct *tsk = current;
480
481 task_lock(tsk);
482 481
483 /* 482#ifdef CONFIG_SMP
484 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() 483/* Another cpu may still have their %cr3 pointing at the pagetable, so
485 * *much* faster this way, as no tlb flushes means bigger wrpt batches. 484 we need to repoint it somewhere else before we can unpin it. */
486 */ 485static void drop_other_mm_ref(void *info)
487 if (tsk->active_mm == mm) { 486{
488 tsk->active_mm = &init_mm; 487 struct mm_struct *mm = info;
489 atomic_inc(&init_mm.mm_count);
490 488
491 switch_mm(mm, &init_mm, tsk); 489 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
490 leave_mm(smp_processor_id());
491}
492 492
493 atomic_dec(&mm->mm_count); 493static void drop_mm_ref(struct mm_struct *mm)
494 BUG_ON(atomic_read(&mm->mm_count) == 0); 494{
495 if (current->active_mm == mm) {
496 if (current->mm == mm)
497 load_cr3(swapper_pg_dir);
498 else
499 leave_mm(smp_processor_id());
495 } 500 }
496 501
497 task_unlock(tsk); 502 if (!cpus_empty(mm->cpu_vm_mask))
503 xen_smp_call_function_mask(mm->cpu_vm_mask, drop_other_mm_ref,
504 mm, 1);
505}
506#else
507static void drop_mm_ref(struct mm_struct *mm)
508{
509 if (current->active_mm == mm)
510 load_cr3(swapper_pg_dir);
511}
512#endif
513
514/*
515 * While a process runs, Xen pins its pagetables, which means that the
516 * hypervisor forces it to be read-only, and it controls all updates
517 * to it. This means that all pagetable updates have to go via the
518 * hypervisor, which is moderately expensive.
519 *
520 * Since we're pulling the pagetable down, we switch to use init_mm,
521 * unpin old process pagetable and mark it all read-write, which
522 * allows further operations on it to be simple memory accesses.
523 *
524 * The only subtle point is that another CPU may be still using the
525 * pagetable because of lazy tlb flushing. This means we need need to
526 * switch all CPUs off this pagetable before we can unpin it.
527 */
528void xen_exit_mmap(struct mm_struct *mm)
529{
530 get_cpu(); /* make sure we don't move around */
531 drop_mm_ref(mm);
532 put_cpu();
498 533
499 xen_pgd_unpin(mm->pgd); 534 xen_pgd_unpin(mm->pgd);
500} 535}
diff --git a/arch/i386/xen/mmu.h b/arch/i386/xen/mmu.h
index 49776fe9f02a..c9ff27f3ac3a 100644
--- a/arch/i386/xen/mmu.h
+++ b/arch/i386/xen/mmu.h
@@ -3,6 +3,19 @@
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6/*
7 * Page-directory addresses above 4GB do not fit into architectural %cr3.
8 * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
9 * must use the following accessor macros to pack/unpack valid MFNs.
10 *
11 * Note that Xen is using the fact that the pagetable base is always
12 * page-aligned, and putting the 12 MSB of the address into the 12 LSB
13 * of cr3.
14 */
15#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
16#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
17
18
6void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 19void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
7 20
8void xen_set_pte(pte_t *ptep, pte_t pteval); 21void xen_set_pte(pte_t *ptep, pte_t pteval);
diff --git a/arch/i386/xen/setup.c b/arch/i386/xen/setup.c
index 7da93ee612f6..18a994d5a4c5 100644
--- a/arch/i386/xen/setup.c
+++ b/arch/i386/xen/setup.c
@@ -94,4 +94,9 @@ void __init xen_arch_setup(void)
94 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); 94 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
95 95
96 pm_idle = xen_idle; 96 pm_idle = xen_idle;
97
98#ifdef CONFIG_SMP
99 /* fill cpus_possible with all available cpus */
100 xen_fill_possible_map();
101#endif
97} 102}
diff --git a/arch/i386/xen/smp.c b/arch/i386/xen/smp.c
new file mode 100644
index 000000000000..a91587fbf5c2
--- /dev/null
+++ b/arch/i386/xen/smp.c
@@ -0,0 +1,407 @@
1/*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
14 *
15 * This does not handle HOTPLUG_CPU yet.
16 */
17#include <linux/sched.h>
18#include <linux/err.h>
19#include <linux/smp.h>
20
21#include <asm/paravirt.h>
22#include <asm/desc.h>
23#include <asm/pgtable.h>
24#include <asm/cpu.h>
25
26#include <xen/interface/xen.h>
27#include <xen/interface/vcpu.h>
28
29#include <asm/xen/interface.h>
30#include <asm/xen/hypercall.h>
31
32#include <xen/page.h>
33#include <xen/events.h>
34
35#include "xen-ops.h"
36#include "mmu.h"
37
38static cpumask_t cpu_initialized_map;
39static DEFINE_PER_CPU(int, resched_irq);
40static DEFINE_PER_CPU(int, callfunc_irq);
41
42/*
43 * Structure and data for smp_call_function(). This is designed to minimise
44 * static memory requirements. It also looks cleaner.
45 */
46static DEFINE_SPINLOCK(call_lock);
47
48struct call_data_struct {
49 void (*func) (void *info);
50 void *info;
51 atomic_t started;
52 atomic_t finished;
53 int wait;
54};
55
56static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
57
58static struct call_data_struct *call_data;
59
60/*
61 * Reschedule call back. Nothing to do,
62 * all the work is done automatically when
63 * we return from the interrupt.
64 */
65static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
66{
67 return IRQ_HANDLED;
68}
69
70static __cpuinit void cpu_bringup_and_idle(void)
71{
72 int cpu = smp_processor_id();
73
74 cpu_init();
75
76 preempt_disable();
77 per_cpu(cpu_state, cpu) = CPU_ONLINE;
78
79 xen_setup_cpu_clockevents();
80
81 /* We can take interrupts now: we're officially "up". */
82 local_irq_enable();
83
84 wmb(); /* make sure everything is out */
85 cpu_idle();
86}
87
88static int xen_smp_intr_init(unsigned int cpu)
89{
90 int rc;
91 const char *resched_name, *callfunc_name;
92
93 per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
94
95 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
96 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
97 cpu,
98 xen_reschedule_interrupt,
99 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
100 resched_name,
101 NULL);
102 if (rc < 0)
103 goto fail;
104 per_cpu(resched_irq, cpu) = rc;
105
106 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
107 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
108 cpu,
109 xen_call_function_interrupt,
110 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
111 callfunc_name,
112 NULL);
113 if (rc < 0)
114 goto fail;
115 per_cpu(callfunc_irq, cpu) = rc;
116
117 return 0;
118
119 fail:
120 if (per_cpu(resched_irq, cpu) >= 0)
121 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
122 if (per_cpu(callfunc_irq, cpu) >= 0)
123 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
124 return rc;
125}
126
127void __init xen_fill_possible_map(void)
128{
129 int i, rc;
130
131 for (i = 0; i < NR_CPUS; i++) {
132 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
133 if (rc >= 0)
134 cpu_set(i, cpu_possible_map);
135 }
136}
137
138void __init xen_smp_prepare_boot_cpu(void)
139{
140 int cpu;
141
142 BUG_ON(smp_processor_id() != 0);
143 native_smp_prepare_boot_cpu();
144
145 xen_vcpu_setup(0);
146
147 /* We've switched to the "real" per-cpu gdt, so make sure the
148 old memory can be recycled */
149 make_lowmem_page_readwrite(&per_cpu__gdt_page);
150
151 for (cpu = 0; cpu < NR_CPUS; cpu++) {
152 cpus_clear(cpu_sibling_map[cpu]);
153 cpus_clear(cpu_core_map[cpu]);
154 }
155}
156
157void __init xen_smp_prepare_cpus(unsigned int max_cpus)
158{
159 unsigned cpu;
160
161 for (cpu = 0; cpu < NR_CPUS; cpu++) {
162 cpus_clear(cpu_sibling_map[cpu]);
163 cpus_clear(cpu_core_map[cpu]);
164 }
165
166 smp_store_cpu_info(0);
167 set_cpu_sibling_map(0);
168
169 if (xen_smp_intr_init(0))
170 BUG();
171
172 cpu_initialized_map = cpumask_of_cpu(0);
173
174 /* Restrict the possible_map according to max_cpus. */
175 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
176 for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
177 continue;
178 cpu_clear(cpu, cpu_possible_map);
179 }
180
181 for_each_possible_cpu (cpu) {
182 struct task_struct *idle;
183
184 if (cpu == 0)
185 continue;
186
187 idle = fork_idle(cpu);
188 if (IS_ERR(idle))
189 panic("failed fork for CPU %d", cpu);
190
191 cpu_set(cpu, cpu_present_map);
192 }
193
194 //init_xenbus_allowed_cpumask();
195}
196
197static __cpuinit int
198cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
199{
200 struct vcpu_guest_context *ctxt;
201 struct gdt_page *gdt = &per_cpu(gdt_page, cpu);
202
203 if (cpu_test_and_set(cpu, cpu_initialized_map))
204 return 0;
205
206 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
207 if (ctxt == NULL)
208 return -ENOMEM;
209
210 ctxt->flags = VGCF_IN_KERNEL;
211 ctxt->user_regs.ds = __USER_DS;
212 ctxt->user_regs.es = __USER_DS;
213 ctxt->user_regs.fs = __KERNEL_PERCPU;
214 ctxt->user_regs.gs = 0;
215 ctxt->user_regs.ss = __KERNEL_DS;
216 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
217 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
218
219 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
220
221 xen_copy_trap_info(ctxt->trap_ctxt);
222
223 ctxt->ldt_ents = 0;
224
225 BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK);
226 make_lowmem_page_readonly(gdt->gdt);
227
228 ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt);
229 ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt);
230
231 ctxt->user_regs.cs = __KERNEL_CS;
232 ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
233
234 ctxt->kernel_ss = __KERNEL_DS;
235 ctxt->kernel_sp = idle->thread.esp0;
236
237 ctxt->event_callback_cs = __KERNEL_CS;
238 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
239 ctxt->failsafe_callback_cs = __KERNEL_CS;
240 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
241
242 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
243 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
244
245 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
246 BUG();
247
248 kfree(ctxt);
249 return 0;
250}
251
252int __cpuinit xen_cpu_up(unsigned int cpu)
253{
254 struct task_struct *idle = idle_task(cpu);
255 int rc;
256
257#if 0
258 rc = cpu_up_check(cpu);
259 if (rc)
260 return rc;
261#endif
262
263 init_gdt(cpu);
264 per_cpu(current_task, cpu) = idle;
265 xen_vcpu_setup(cpu);
266 irq_ctx_init(cpu);
267 xen_setup_timer(cpu);
268
269 /* make sure interrupts start blocked */
270 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
271
272 rc = cpu_initialize_context(cpu, idle);
273 if (rc)
274 return rc;
275
276 if (num_online_cpus() == 1)
277 alternatives_smp_switch(1);
278
279 rc = xen_smp_intr_init(cpu);
280 if (rc)
281 return rc;
282
283 smp_store_cpu_info(cpu);
284 set_cpu_sibling_map(cpu);
285 /* This must be done before setting cpu_online_map */
286 wmb();
287
288 cpu_set(cpu, cpu_online_map);
289
290 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
291 BUG_ON(rc);
292
293 return 0;
294}
295
296void xen_smp_cpus_done(unsigned int max_cpus)
297{
298}
299
300static void stop_self(void *v)
301{
302 int cpu = smp_processor_id();
303
304 /* make sure we're not pinning something down */
305 load_cr3(swapper_pg_dir);
306 /* should set up a minimal gdt */
307
308 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
309 BUG();
310}
311
312void xen_smp_send_stop(void)
313{
314 cpumask_t mask = cpu_online_map;
315 cpu_clear(smp_processor_id(), mask);
316 xen_smp_call_function_mask(mask, stop_self, NULL, 0);
317}
318
319void xen_smp_send_reschedule(int cpu)
320{
321 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
322}
323
324
325static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
326{
327 unsigned cpu;
328
329 cpus_and(mask, mask, cpu_online_map);
330
331 for_each_cpu_mask(cpu, mask)
332 xen_send_IPI_one(cpu, vector);
333}
334
335static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
336{
337 void (*func) (void *info) = call_data->func;
338 void *info = call_data->info;
339 int wait = call_data->wait;
340
341 /*
342 * Notify initiating CPU that I've grabbed the data and am
343 * about to execute the function
344 */
345 mb();
346 atomic_inc(&call_data->started);
347 /*
348 * At this point the info structure may be out of scope unless wait==1
349 */
350 irq_enter();
351 (*func)(info);
352 irq_exit();
353
354 if (wait) {
355 mb(); /* commit everything before setting finished */
356 atomic_inc(&call_data->finished);
357 }
358
359 return IRQ_HANDLED;
360}
361
362int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
363 void *info, int wait)
364{
365 struct call_data_struct data;
366 int cpus;
367
368 /* Holding any lock stops cpus from going down. */
369 spin_lock(&call_lock);
370
371 cpu_clear(smp_processor_id(), mask);
372
373 cpus = cpus_weight(mask);
374 if (!cpus) {
375 spin_unlock(&call_lock);
376 return 0;
377 }
378
379 /* Can deadlock when called with interrupts disabled */
380 WARN_ON(irqs_disabled());
381
382 data.func = func;
383 data.info = info;
384 atomic_set(&data.started, 0);
385 data.wait = wait;
386 if (wait)
387 atomic_set(&data.finished, 0);
388
389 call_data = &data;
390 mb(); /* write everything before IPI */
391
392 /* Send a message to other CPUs and wait for them to respond */
393 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
394
395 /* Make sure other vcpus get a chance to run.
396 XXX too severe? Maybe we should check the other CPU's states? */
397 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
398
399 /* Wait for response */
400 while (atomic_read(&data.started) != cpus ||
401 (wait && atomic_read(&data.finished) != cpus))
402 cpu_relax();
403
404 spin_unlock(&call_lock);
405
406 return 0;
407}
diff --git a/arch/i386/xen/time.c b/arch/i386/xen/time.c
index 2aab44bec2a5..aeb04cf5dbf1 100644
--- a/arch/i386/xen/time.c
+++ b/arch/i386/xen/time.c
@@ -519,7 +519,7 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
519 return ret; 519 return ret;
520} 520}
521 521
522static void xen_setup_timer(int cpu) 522void xen_setup_timer(int cpu)
523{ 523{
524 const char *name; 524 const char *name;
525 struct clock_event_device *evt; 525 struct clock_event_device *evt;
@@ -535,16 +535,20 @@ static void xen_setup_timer(int cpu)
535 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 535 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
536 name, NULL); 536 name, NULL);
537 537
538 evt = &get_cpu_var(xen_clock_events); 538 evt = &per_cpu(xen_clock_events, cpu);
539 memcpy(evt, xen_clockevent, sizeof(*evt)); 539 memcpy(evt, xen_clockevent, sizeof(*evt));
540 540
541 evt->cpumask = cpumask_of_cpu(cpu); 541 evt->cpumask = cpumask_of_cpu(cpu);
542 evt->irq = irq; 542 evt->irq = irq;
543 clockevents_register_device(evt);
544 543
545 setup_runstate_info(cpu); 544 setup_runstate_info(cpu);
545}
546
547void xen_setup_cpu_clockevents(void)
548{
549 BUG_ON(preemptible());
546 550
547 put_cpu_var(xen_clock_events); 551 clockevents_register_device(&__get_cpu_var(xen_clock_events));
548} 552}
549 553
550__init void xen_time_init(void) 554__init void xen_time_init(void)
@@ -570,4 +574,5 @@ __init void xen_time_init(void)
570 tsc_disable = 0; 574 tsc_disable = 0;
571 575
572 xen_setup_timer(cpu); 576 xen_setup_timer(cpu);
577 xen_setup_cpu_clockevents();
573} 578}
diff --git a/arch/i386/xen/xen-ops.h b/arch/i386/xen/xen-ops.h
index 7667abd390ec..4069be8ba31f 100644
--- a/arch/i386/xen/xen-ops.h
+++ b/arch/i386/xen/xen-ops.h
@@ -3,6 +3,12 @@
3 3
4#include <linux/init.h> 4#include <linux/init.h>
5 5
6/* These are code, but not functions. Defined in entry.S */
7extern const char xen_hypervisor_callback[];
8extern const char xen_failsafe_callback[];
9
10void xen_copy_trap_info(struct trap_info *traps);
11
6DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 12DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
7DECLARE_PER_CPU(unsigned long, xen_cr3); 13DECLARE_PER_CPU(unsigned long, xen_cr3);
8 14
@@ -13,6 +19,8 @@ char * __init xen_memory_setup(void);
13void __init xen_arch_setup(void); 19void __init xen_arch_setup(void);
14void __init xen_init_IRQ(void); 20void __init xen_init_IRQ(void);
15 21
22void xen_setup_timer(int cpu);
23void xen_setup_cpu_clockevents(void);
16unsigned long xen_cpu_khz(void); 24unsigned long xen_cpu_khz(void);
17void __init xen_time_init(void); 25void __init xen_time_init(void);
18unsigned long xen_get_wallclock(void); 26unsigned long xen_get_wallclock(void);
@@ -28,5 +36,22 @@ static inline unsigned xen_get_lazy_mode(void)
28 return x86_read_percpu(xen_lazy_mode); 36 return x86_read_percpu(xen_lazy_mode);
29} 37}
30 38
39void __init xen_fill_possible_map(void);
40
41void xen_vcpu_setup(int cpu);
42void xen_smp_prepare_boot_cpu(void);
43void xen_smp_prepare_cpus(unsigned int max_cpus);
44int xen_cpu_up(unsigned int cpu);
45void xen_smp_cpus_done(unsigned int max_cpus);
46
47void xen_smp_send_stop(void);
48void xen_smp_send_reschedule(int cpu);
49int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic,
50 int wait);
51int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
52 int nonatomic, int wait);
53
54int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
55 void *info, int wait);
31 56
32#endif /* XEN_OPS_H */ 57#endif /* XEN_OPS_H */