aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/xen/Kconfig12
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c163
-rw-r--r--arch/x86/xen/manage.c143
-rw-r--r--arch/x86/xen/mmu.c227
-rw-r--r--arch/x86/xen/mmu.h32
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--arch/x86/xen/smp.c8
-rw-r--r--arch/x86/xen/suspend.c45
-rw-r--r--arch/x86/xen/time.c13
-rw-r--r--arch/x86/xen/xen-head.S9
-rw-r--r--arch/x86/xen/xen-ops.h11
-rw-r--r--drivers/char/hvc_xen.c61
-rw-r--r--drivers/input/xen-kbdfront.c20
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/video/xen-fbfront.c211
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/balloon.c10
-rw-r--r--drivers/xen/events.c114
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/manage.c252
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c23
-rw-r--r--include/asm-x86/page.h1
-rw-r--r--include/asm-x86/paravirt.h15
-rw-r--r--include/asm-x86/pgtable.h16
-rw-r--r--include/asm-x86/xen/hypercall.h11
-rw-r--r--include/asm-x86/xen/page.h29
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/page-flags.h1
-rw-r--r--include/xen/events.h4
-rw-r--r--include/xen/grant_table.h3
-rw-r--r--include/xen/hvc-console.h9
-rw-r--r--include/xen/interface/elfnote.h20
-rw-r--r--include/xen/interface/io/fbif.h29
-rw-r--r--include/xen/interface/io/kbdif.h2
-rw-r--r--include/xen/interface/memory.h12
-rw-r--r--include/xen/xen-ops.h6
-rw-r--r--kernel/printk.c3
39 files changed, 1190 insertions, 342 deletions
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 74f0c5ea2a03..c98d54688180 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -403,6 +403,7 @@ struct pv_mmu_ops pv_mmu_ops = {
403#endif /* PAGETABLE_LEVELS >= 3 */ 403#endif /* PAGETABLE_LEVELS >= 3 */
404 404
405 .pte_val = native_pte_val, 405 .pte_val = native_pte_val,
406 .pte_flags = native_pte_val,
406 .pgd_val = native_pgd_val, 407 .pgd_val = native_pgd_val,
407 408
408 .make_pte = native_make_pte, 409 .make_pte = native_make_pte,
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 2e641be2737e..d0f1c7c5dc3d 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -6,8 +6,18 @@ config XEN
6 bool "Xen guest support" 6 bool "Xen guest support"
7 select PARAVIRT 7 select PARAVIRT
8 depends on X86_32 8 depends on X86_32
9 depends on X86_CMPXCHG && X86_TSC && !(X86_VISWS || X86_VOYAGER) 9 depends on X86_CMPXCHG && X86_TSC && X86_PAE && !(X86_VISWS || X86_VOYAGER)
10 help 10 help
11 This is the Linux Xen port. Enabling this will allow the 11 This is the Linux Xen port. Enabling this will allow the
12 kernel to boot in a paravirtualized environment under the 12 kernel to boot in a paravirtualized environment under the
13 Xen hypervisor. 13 Xen hypervisor.
14
15config XEN_MAX_DOMAIN_MEMORY
16 int "Maximum allowed size of a domain in gigabytes"
17 default 8
18 depends on XEN
19 help
20 The pseudo-physical to machine address array is sized
21 according to the maximum possible memory size of a Xen
22 domain. This array uses 1 page per gigabyte, so there's no
23 need to be too stingy here. \ No newline at end of file
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 3d8df981d5fd..2ba2d1649131 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,4 +1,4 @@
1obj-y := enlighten.o setup.o multicalls.o mmu.o \ 1obj-y := enlighten.o setup.o multicalls.o mmu.o \
2 time.o manage.o xen-asm.o grant-table.o 2 time.o xen-asm.o grant-table.o suspend.o
3 3
4obj-$(CONFIG_SMP) += smp.o 4obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c8a56e457d61..8e6152e6ed88 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -75,13 +75,13 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
75struct start_info *xen_start_info; 75struct start_info *xen_start_info;
76EXPORT_SYMBOL_GPL(xen_start_info); 76EXPORT_SYMBOL_GPL(xen_start_info);
77 77
78static /* __initdata */ struct shared_info dummy_shared_info; 78struct shared_info xen_dummy_shared_info;
79 79
80/* 80/*
81 * Point at some empty memory to start with. We map the real shared_info 81 * Point at some empty memory to start with. We map the real shared_info
82 * page as soon as fixmap is up and running. 82 * page as soon as fixmap is up and running.
83 */ 83 */
84struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info; 84struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
85 85
86/* 86/*
87 * Flag to determine whether vcpu info placement is available on all 87 * Flag to determine whether vcpu info placement is available on all
@@ -98,13 +98,13 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
98 */ 98 */
99static int have_vcpu_info_placement = 1; 99static int have_vcpu_info_placement = 1;
100 100
101static void __init xen_vcpu_setup(int cpu) 101static void xen_vcpu_setup(int cpu)
102{ 102{
103 struct vcpu_register_vcpu_info info; 103 struct vcpu_register_vcpu_info info;
104 int err; 104 int err;
105 struct vcpu_info *vcpup; 105 struct vcpu_info *vcpup;
106 106
107 BUG_ON(HYPERVISOR_shared_info == &dummy_shared_info); 107 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
108 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 108 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
109 109
110 if (!have_vcpu_info_placement) 110 if (!have_vcpu_info_placement)
@@ -136,6 +136,34 @@ static void __init xen_vcpu_setup(int cpu)
136 } 136 }
137} 137}
138 138
139/*
140 * On restore, set the vcpu placement up again.
141 * If it fails, then we're in a bad state, since
142 * we can't back out from using it...
143 */
144void xen_vcpu_restore(void)
145{
146 if (have_vcpu_info_placement) {
147 int cpu;
148
149 for_each_online_cpu(cpu) {
150 bool other_cpu = (cpu != smp_processor_id());
151
152 if (other_cpu &&
153 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
154 BUG();
155
156 xen_vcpu_setup(cpu);
157
158 if (other_cpu &&
159 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
160 BUG();
161 }
162
163 BUG_ON(!have_vcpu_info_placement);
164 }
165}
166
139static void __init xen_banner(void) 167static void __init xen_banner(void)
140{ 168{
141 printk(KERN_INFO "Booting paravirtualized kernel on %s\n", 169 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -235,13 +263,13 @@ static void xen_irq_enable(void)
235{ 263{
236 struct vcpu_info *vcpu; 264 struct vcpu_info *vcpu;
237 265
238 /* There's a one instruction preempt window here. We need to 266 /* We don't need to worry about being preempted here, since
239 make sure we're don't switch CPUs between getting the vcpu 267 either a) interrupts are disabled, so no preemption, or b)
240 pointer and updating the mask. */ 268 the caller is confused and is trying to re-enable interrupts
241 preempt_disable(); 269 on an indeterminate processor. */
270
242 vcpu = x86_read_percpu(xen_vcpu); 271 vcpu = x86_read_percpu(xen_vcpu);
243 vcpu->evtchn_upcall_mask = 0; 272 vcpu->evtchn_upcall_mask = 0;
244 preempt_enable_no_resched();
245 273
246 /* Doesn't matter if we get preempted here, because any 274 /* Doesn't matter if we get preempted here, because any
247 pending event will get dealt with anyway. */ 275 pending event will get dealt with anyway. */
@@ -254,7 +282,7 @@ static void xen_irq_enable(void)
254static void xen_safe_halt(void) 282static void xen_safe_halt(void)
255{ 283{
256 /* Blocking includes an implicit local_irq_enable(). */ 284 /* Blocking includes an implicit local_irq_enable(). */
257 if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0) 285 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
258 BUG(); 286 BUG();
259} 287}
260 288
@@ -607,6 +635,30 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
607 xen_mc_issue(PARAVIRT_LAZY_MMU); 635 xen_mc_issue(PARAVIRT_LAZY_MMU);
608} 636}
609 637
638static void xen_clts(void)
639{
640 struct multicall_space mcs;
641
642 mcs = xen_mc_entry(0);
643
644 MULTI_fpu_taskswitch(mcs.mc, 0);
645
646 xen_mc_issue(PARAVIRT_LAZY_CPU);
647}
648
649static void xen_write_cr0(unsigned long cr0)
650{
651 struct multicall_space mcs;
652
653 /* Only pay attention to cr0.TS; everything else is
654 ignored. */
655 mcs = xen_mc_entry(0);
656
657 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
658
659 xen_mc_issue(PARAVIRT_LAZY_CPU);
660}
661
610static void xen_write_cr2(unsigned long cr2) 662static void xen_write_cr2(unsigned long cr2)
611{ 663{
612 x86_read_percpu(xen_vcpu)->arch.cr2 = cr2; 664 x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
@@ -624,8 +676,10 @@ static unsigned long xen_read_cr2_direct(void)
624 676
625static void xen_write_cr4(unsigned long cr4) 677static void xen_write_cr4(unsigned long cr4)
626{ 678{
627 /* Just ignore cr4 changes; Xen doesn't allow us to do 679 cr4 &= ~X86_CR4_PGE;
628 anything anyway. */ 680 cr4 &= ~X86_CR4_PSE;
681
682 native_write_cr4(cr4);
629} 683}
630 684
631static unsigned long xen_read_cr3(void) 685static unsigned long xen_read_cr3(void)
@@ -785,38 +839,35 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
785static __init void xen_pagetable_setup_start(pgd_t *base) 839static __init void xen_pagetable_setup_start(pgd_t *base)
786{ 840{
787 pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; 841 pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
842 int i;
788 843
789 /* special set_pte for pagetable initialization */ 844 /* special set_pte for pagetable initialization */
790 pv_mmu_ops.set_pte = xen_set_pte_init; 845 pv_mmu_ops.set_pte = xen_set_pte_init;
791 846
792 init_mm.pgd = base; 847 init_mm.pgd = base;
793 /* 848 /*
794 * copy top-level of Xen-supplied pagetable into place. For 849 * copy top-level of Xen-supplied pagetable into place. This
795 * !PAE we can use this as-is, but for PAE it is a stand-in 850 * is a stand-in while we copy the pmd pages.
796 * while we copy the pmd pages.
797 */ 851 */
798 memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 852 memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
799 853
800 if (PTRS_PER_PMD > 1) { 854 /*
801 int i; 855 * For PAE, need to allocate new pmds, rather than
802 /* 856 * share Xen's, since Xen doesn't like pmd's being
803 * For PAE, need to allocate new pmds, rather than 857 * shared between address spaces.
804 * share Xen's, since Xen doesn't like pmd's being 858 */
805 * shared between address spaces. 859 for (i = 0; i < PTRS_PER_PGD; i++) {
806 */ 860 if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
807 for (i = 0; i < PTRS_PER_PGD; i++) { 861 pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
808 if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
809 pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
810 862
811 memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]), 863 memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
812 PAGE_SIZE); 864 PAGE_SIZE);
813 865
814 make_lowmem_page_readonly(pmd); 866 make_lowmem_page_readonly(pmd);
815 867
816 set_pgd(&base[i], __pgd(1 + __pa(pmd))); 868 set_pgd(&base[i], __pgd(1 + __pa(pmd)));
817 } else 869 } else
818 pgd_clear(&base[i]); 870 pgd_clear(&base[i]);
819 }
820 } 871 }
821 872
822 /* make sure zero_page is mapped RO so we can use it in pagetables */ 873 /* make sure zero_page is mapped RO so we can use it in pagetables */
@@ -834,7 +885,7 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
834 PFN_DOWN(__pa(xen_start_info->pt_base))); 885 PFN_DOWN(__pa(xen_start_info->pt_base)));
835} 886}
836 887
837static __init void setup_shared_info(void) 888void xen_setup_shared_info(void)
838{ 889{
839 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 890 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
840 unsigned long addr = fix_to_virt(FIX_PARAVIRT_BOOTMAP); 891 unsigned long addr = fix_to_virt(FIX_PARAVIRT_BOOTMAP);
@@ -857,6 +908,8 @@ static __init void setup_shared_info(void)
857 /* In UP this is as good a place as any to set up shared info */ 908 /* In UP this is as good a place as any to set up shared info */
858 xen_setup_vcpu_info_placement(); 909 xen_setup_vcpu_info_placement();
859#endif 910#endif
911
912 xen_setup_mfn_list_list();
860} 913}
861 914
862static __init void xen_pagetable_setup_done(pgd_t *base) 915static __init void xen_pagetable_setup_done(pgd_t *base)
@@ -869,25 +922,23 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
869 pv_mmu_ops.release_pmd = xen_release_pmd; 922 pv_mmu_ops.release_pmd = xen_release_pmd;
870 pv_mmu_ops.set_pte = xen_set_pte; 923 pv_mmu_ops.set_pte = xen_set_pte;
871 924
872 setup_shared_info(); 925 xen_setup_shared_info();
873 926
874 /* Actually pin the pagetable down, but we can't set PG_pinned 927 /* Actually pin the pagetable down, but we can't set PG_pinned
875 yet because the page structures don't exist yet. */ 928 yet because the page structures don't exist yet. */
876 { 929 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
877 unsigned level; 930}
878 931
879#ifdef CONFIG_X86_PAE 932static __init void xen_post_allocator_init(void)
880 level = MMUEXT_PIN_L3_TABLE; 933{
881#else 934 pv_mmu_ops.set_pmd = xen_set_pmd;
882 level = MMUEXT_PIN_L2_TABLE; 935 pv_mmu_ops.set_pud = xen_set_pud;
883#endif
884 936
885 pin_pagetable_pfn(level, PFN_DOWN(__pa(base))); 937 xen_mark_init_mm_pinned();
886 }
887} 938}
888 939
889/* This is called once we have the cpu_possible_map */ 940/* This is called once we have the cpu_possible_map */
890void __init xen_setup_vcpu_info_placement(void) 941void xen_setup_vcpu_info_placement(void)
891{ 942{
892 int cpu; 943 int cpu;
893 944
@@ -973,7 +1024,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
973 .banner = xen_banner, 1024 .banner = xen_banner,
974 .memory_setup = xen_memory_setup, 1025 .memory_setup = xen_memory_setup,
975 .arch_setup = xen_arch_setup, 1026 .arch_setup = xen_arch_setup,
976 .post_allocator_init = xen_mark_init_mm_pinned, 1027 .post_allocator_init = xen_post_allocator_init,
977}; 1028};
978 1029
979static const struct pv_time_ops xen_time_ops __initdata = { 1030static const struct pv_time_ops xen_time_ops __initdata = {
@@ -991,10 +1042,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
991 .set_debugreg = xen_set_debugreg, 1042 .set_debugreg = xen_set_debugreg,
992 .get_debugreg = xen_get_debugreg, 1043 .get_debugreg = xen_get_debugreg,
993 1044
994 .clts = native_clts, 1045 .clts = xen_clts,
995 1046
996 .read_cr0 = native_read_cr0, 1047 .read_cr0 = native_read_cr0,
997 .write_cr0 = native_write_cr0, 1048 .write_cr0 = xen_write_cr0,
998 1049
999 .read_cr4 = native_read_cr4, 1050 .read_cr4 = native_read_cr4,
1000 .read_cr4_safe = native_read_cr4_safe, 1051 .read_cr4_safe = native_read_cr4_safe,
@@ -1085,24 +1136,23 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1085 1136
1086 .set_pte = NULL, /* see xen_pagetable_setup_* */ 1137 .set_pte = NULL, /* see xen_pagetable_setup_* */
1087 .set_pte_at = xen_set_pte_at, 1138 .set_pte_at = xen_set_pte_at,
1088 .set_pmd = xen_set_pmd, 1139 .set_pmd = xen_set_pmd_hyper,
1089 1140
1090 .pte_val = xen_pte_val, 1141 .pte_val = xen_pte_val,
1142 .pte_flags = native_pte_val,
1091 .pgd_val = xen_pgd_val, 1143 .pgd_val = xen_pgd_val,
1092 1144
1093 .make_pte = xen_make_pte, 1145 .make_pte = xen_make_pte,
1094 .make_pgd = xen_make_pgd, 1146 .make_pgd = xen_make_pgd,
1095 1147
1096#ifdef CONFIG_X86_PAE
1097 .set_pte_atomic = xen_set_pte_atomic, 1148 .set_pte_atomic = xen_set_pte_atomic,
1098 .set_pte_present = xen_set_pte_at, 1149 .set_pte_present = xen_set_pte_at,
1099 .set_pud = xen_set_pud, 1150 .set_pud = xen_set_pud_hyper,
1100 .pte_clear = xen_pte_clear, 1151 .pte_clear = xen_pte_clear,
1101 .pmd_clear = xen_pmd_clear, 1152 .pmd_clear = xen_pmd_clear,
1102 1153
1103 .make_pmd = xen_make_pmd, 1154 .make_pmd = xen_make_pmd,
1104 .pmd_val = xen_pmd_val, 1155 .pmd_val = xen_pmd_val,
1105#endif /* PAE */
1106 1156
1107 .activate_mm = xen_activate_mm, 1157 .activate_mm = xen_activate_mm,
1108 .dup_mmap = xen_dup_mmap, 1158 .dup_mmap = xen_dup_mmap,
@@ -1129,11 +1179,13 @@ static const struct smp_ops xen_smp_ops __initdata = {
1129 1179
1130static void xen_reboot(int reason) 1180static void xen_reboot(int reason)
1131{ 1181{
1182 struct sched_shutdown r = { .reason = reason };
1183
1132#ifdef CONFIG_SMP 1184#ifdef CONFIG_SMP
1133 smp_send_stop(); 1185 smp_send_stop();
1134#endif 1186#endif
1135 1187
1136 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason)) 1188 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1137 BUG(); 1189 BUG();
1138} 1190}
1139 1191
@@ -1207,7 +1259,7 @@ asmlinkage void __init xen_start_kernel(void)
1207 1259
1208 /* Get mfn list */ 1260 /* Get mfn list */
1209 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1261 if (!xen_feature(XENFEAT_auto_translated_physmap))
1210 phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; 1262 xen_build_dynamic_phys_to_machine();
1211 1263
1212 pgd = (pgd_t *)xen_start_info->pt_base; 1264 pgd = (pgd_t *)xen_start_info->pt_base;
1213 1265
@@ -1242,8 +1294,11 @@ asmlinkage void __init xen_start_kernel(void)
1242 ? __pa(xen_start_info->mod_start) : 0; 1294 ? __pa(xen_start_info->mod_start) : 0;
1243 boot_params.hdr.ramdisk_size = xen_start_info->mod_len; 1295 boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1244 1296
1245 if (!is_initial_xendomain()) 1297 if (!is_initial_xendomain()) {
1298 add_preferred_console("xenboot", 0, NULL);
1299 add_preferred_console("tty", 0, NULL);
1246 add_preferred_console("hvc", 0, NULL); 1300 add_preferred_console("hvc", 0, NULL);
1301 }
1247 1302
1248 /* Start the world */ 1303 /* Start the world */
1249 start_kernel(); 1304 start_kernel();
diff --git a/arch/x86/xen/manage.c b/arch/x86/xen/manage.c
deleted file mode 100644
index aa7af9e6abc0..000000000000
--- a/arch/x86/xen/manage.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 * Handle extern requests for shutdown, reboot and sysrq
3 */
4#include <linux/kernel.h>
5#include <linux/err.h>
6#include <linux/reboot.h>
7#include <linux/sysrq.h>
8
9#include <xen/xenbus.h>
10
11#define SHUTDOWN_INVALID -1
12#define SHUTDOWN_POWEROFF 0
13#define SHUTDOWN_SUSPEND 2
14/* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
15 * report a crash, not be instructed to crash!
16 * HALT is the same as POWEROFF, as far as we're concerned. The tools use
17 * the distinction when we return the reason code to them.
18 */
19#define SHUTDOWN_HALT 4
20
21/* Ignore multiple shutdown requests. */
22static int shutting_down = SHUTDOWN_INVALID;
23
24static void shutdown_handler(struct xenbus_watch *watch,
25 const char **vec, unsigned int len)
26{
27 char *str;
28 struct xenbus_transaction xbt;
29 int err;
30
31 if (shutting_down != SHUTDOWN_INVALID)
32 return;
33
34 again:
35 err = xenbus_transaction_start(&xbt);
36 if (err)
37 return;
38
39 str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
40 /* Ignore read errors and empty reads. */
41 if (XENBUS_IS_ERR_READ(str)) {
42 xenbus_transaction_end(xbt, 1);
43 return;
44 }
45
46 xenbus_write(xbt, "control", "shutdown", "");
47
48 err = xenbus_transaction_end(xbt, 0);
49 if (err == -EAGAIN) {
50 kfree(str);
51 goto again;
52 }
53
54 if (strcmp(str, "poweroff") == 0 ||
55 strcmp(str, "halt") == 0)
56 orderly_poweroff(false);
57 else if (strcmp(str, "reboot") == 0)
58 ctrl_alt_del();
59 else {
60 printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
61 shutting_down = SHUTDOWN_INVALID;
62 }
63
64 kfree(str);
65}
66
67static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
68 unsigned int len)
69{
70 char sysrq_key = '\0';
71 struct xenbus_transaction xbt;
72 int err;
73
74 again:
75 err = xenbus_transaction_start(&xbt);
76 if (err)
77 return;
78 if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
79 printk(KERN_ERR "Unable to read sysrq code in "
80 "control/sysrq\n");
81 xenbus_transaction_end(xbt, 1);
82 return;
83 }
84
85 if (sysrq_key != '\0')
86 xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
87
88 err = xenbus_transaction_end(xbt, 0);
89 if (err == -EAGAIN)
90 goto again;
91
92 if (sysrq_key != '\0')
93 handle_sysrq(sysrq_key, NULL);
94}
95
96static struct xenbus_watch shutdown_watch = {
97 .node = "control/shutdown",
98 .callback = shutdown_handler
99};
100
101static struct xenbus_watch sysrq_watch = {
102 .node = "control/sysrq",
103 .callback = sysrq_handler
104};
105
106static int setup_shutdown_watcher(void)
107{
108 int err;
109
110 err = register_xenbus_watch(&shutdown_watch);
111 if (err) {
112 printk(KERN_ERR "Failed to set shutdown watcher\n");
113 return err;
114 }
115
116 err = register_xenbus_watch(&sysrq_watch);
117 if (err) {
118 printk(KERN_ERR "Failed to set sysrq watcher\n");
119 return err;
120 }
121
122 return 0;
123}
124
125static int shutdown_event(struct notifier_block *notifier,
126 unsigned long event,
127 void *data)
128{
129 setup_shutdown_watcher();
130 return NOTIFY_DONE;
131}
132
133static int __init setup_shutdown_event(void)
134{
135 static struct notifier_block xenstore_notifier = {
136 .notifier_call = shutdown_event
137 };
138 register_xenstore_notifier(&xenstore_notifier);
139
140 return 0;
141}
142
143subsys_initcall(setup_shutdown_event);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3525ef523a74..7c9935858f92 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -56,6 +56,131 @@
56#include "multicalls.h" 56#include "multicalls.h"
57#include "mmu.h" 57#include "mmu.h"
58 58
59#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
60#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
61
62/* Placeholder for holes in the address space */
63static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
64 __attribute__((section(".data.page_aligned"))) =
65 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
66
67 /* Array of pointers to pages containing p2m entries */
68static unsigned long *p2m_top[TOP_ENTRIES]
69 __attribute__((section(".data.page_aligned"))) =
70 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
71
72/* Arrays of p2m arrays expressed in mfns used for save/restore */
73static unsigned long p2m_top_mfn[TOP_ENTRIES]
74 __attribute__((section(".bss.page_aligned")));
75
76static unsigned long p2m_top_mfn_list[
77 PAGE_ALIGN(TOP_ENTRIES / P2M_ENTRIES_PER_PAGE)]
78 __attribute__((section(".bss.page_aligned")));
79
80static inline unsigned p2m_top_index(unsigned long pfn)
81{
82 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
83 return pfn / P2M_ENTRIES_PER_PAGE;
84}
85
86static inline unsigned p2m_index(unsigned long pfn)
87{
88 return pfn % P2M_ENTRIES_PER_PAGE;
89}
90
91/* Build the parallel p2m_top_mfn structures */
92void xen_setup_mfn_list_list(void)
93{
94 unsigned pfn, idx;
95
96 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
97 unsigned topidx = p2m_top_index(pfn);
98
99 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
100 }
101
102 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
103 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
104 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
105 }
106
107 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
108
109 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
110 virt_to_mfn(p2m_top_mfn_list);
111 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
112}
113
114/* Set up p2m_top to point to the domain-builder provided p2m pages */
115void __init xen_build_dynamic_phys_to_machine(void)
116{
117 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
118 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
119 unsigned pfn;
120
121 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
122 unsigned topidx = p2m_top_index(pfn);
123
124 p2m_top[topidx] = &mfn_list[pfn];
125 }
126}
127
128unsigned long get_phys_to_machine(unsigned long pfn)
129{
130 unsigned topidx, idx;
131
132 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
133 return INVALID_P2M_ENTRY;
134
135 topidx = p2m_top_index(pfn);
136 idx = p2m_index(pfn);
137 return p2m_top[topidx][idx];
138}
139EXPORT_SYMBOL_GPL(get_phys_to_machine);
140
141static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
142{
143 unsigned long *p;
144 unsigned i;
145
146 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
147 BUG_ON(p == NULL);
148
149 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
150 p[i] = INVALID_P2M_ENTRY;
151
152 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
153 free_page((unsigned long)p);
154 else
155 *mfnp = virt_to_mfn(p);
156}
157
158void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
159{
160 unsigned topidx, idx;
161
162 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
163 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
164 return;
165 }
166
167 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
168 BUG_ON(mfn != INVALID_P2M_ENTRY);
169 return;
170 }
171
172 topidx = p2m_top_index(pfn);
173 if (p2m_top[topidx] == p2m_missing) {
174 /* no need to allocate a page to store an invalid entry */
175 if (mfn == INVALID_P2M_ENTRY)
176 return;
177 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
178 }
179
180 idx = p2m_index(pfn);
181 p2m_top[topidx][idx] = mfn;
182}
183
59xmaddr_t arbitrary_virt_to_machine(unsigned long address) 184xmaddr_t arbitrary_virt_to_machine(unsigned long address)
60{ 185{
61 unsigned int level; 186 unsigned int level;
@@ -98,7 +223,14 @@ void make_lowmem_page_readwrite(void *vaddr)
98} 223}
99 224
100 225
101void xen_set_pmd(pmd_t *ptr, pmd_t val) 226static bool page_pinned(void *ptr)
227{
228 struct page *page = virt_to_page(ptr);
229
230 return PagePinned(page);
231}
232
233void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
102{ 234{
103 struct multicall_space mcs; 235 struct multicall_space mcs;
104 struct mmu_update *u; 236 struct mmu_update *u;
@@ -116,6 +248,18 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
116 preempt_enable(); 248 preempt_enable();
117} 249}
118 250
251void xen_set_pmd(pmd_t *ptr, pmd_t val)
252{
253 /* If page is not pinned, we can just update the entry
254 directly */
255 if (!page_pinned(ptr)) {
256 *ptr = val;
257 return;
258 }
259
260 xen_set_pmd_hyper(ptr, val);
261}
262
119/* 263/*
120 * Associate a virtual page frame with a given physical page frame 264 * Associate a virtual page frame with a given physical page frame
121 * and protection flags for that frame. 265 * and protection flags for that frame.
@@ -222,8 +366,8 @@ pmdval_t xen_pmd_val(pmd_t pmd)
222 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; 366 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
223 return ret; 367 return ret;
224} 368}
225#ifdef CONFIG_X86_PAE 369
226void xen_set_pud(pud_t *ptr, pud_t val) 370void xen_set_pud_hyper(pud_t *ptr, pud_t val)
227{ 371{
228 struct multicall_space mcs; 372 struct multicall_space mcs;
229 struct mmu_update *u; 373 struct mmu_update *u;
@@ -241,6 +385,18 @@ void xen_set_pud(pud_t *ptr, pud_t val)
241 preempt_enable(); 385 preempt_enable();
242} 386}
243 387
388void xen_set_pud(pud_t *ptr, pud_t val)
389{
390 /* If page is not pinned, we can just update the entry
391 directly */
392 if (!page_pinned(ptr)) {
393 *ptr = val;
394 return;
395 }
396
397 xen_set_pud_hyper(ptr, val);
398}
399
244void xen_set_pte(pte_t *ptep, pte_t pte) 400void xen_set_pte(pte_t *ptep, pte_t pte)
245{ 401{
246 ptep->pte_high = pte.pte_high; 402 ptep->pte_high = pte.pte_high;
@@ -262,7 +418,7 @@ void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
262 418
263void xen_pmd_clear(pmd_t *pmdp) 419void xen_pmd_clear(pmd_t *pmdp)
264{ 420{
265 xen_set_pmd(pmdp, __pmd(0)); 421 set_pmd(pmdp, __pmd(0));
266} 422}
267 423
268pmd_t xen_make_pmd(pmdval_t pmd) 424pmd_t xen_make_pmd(pmdval_t pmd)
@@ -272,12 +428,6 @@ pmd_t xen_make_pmd(pmdval_t pmd)
272 428
273 return native_make_pmd(pmd); 429 return native_make_pmd(pmd);
274} 430}
275#else /* !PAE */
276void xen_set_pte(pte_t *ptep, pte_t pte)
277{
278 *ptep = pte;
279}
280#endif /* CONFIG_X86_PAE */
281 431
282/* 432/*
283 (Yet another) pagetable walker. This one is intended for pinning a 433 (Yet another) pagetable walker. This one is intended for pinning a
@@ -430,8 +580,6 @@ static int pin_page(struct page *page, enum pt_level level)
430 read-only, and can be pinned. */ 580 read-only, and can be pinned. */
431void xen_pgd_pin(pgd_t *pgd) 581void xen_pgd_pin(pgd_t *pgd)
432{ 582{
433 unsigned level;
434
435 xen_mc_batch(); 583 xen_mc_batch();
436 584
437 if (pgd_walk(pgd, pin_page, TASK_SIZE)) { 585 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
@@ -441,15 +589,31 @@ void xen_pgd_pin(pgd_t *pgd)
441 xen_mc_batch(); 589 xen_mc_batch();
442 } 590 }
443 591
444#ifdef CONFIG_X86_PAE 592 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
445 level = MMUEXT_PIN_L3_TABLE; 593 xen_mc_issue(0);
446#else 594}
447 level = MMUEXT_PIN_L2_TABLE;
448#endif
449 595
450 xen_do_pin(level, PFN_DOWN(__pa(pgd))); 596/*
597 * On save, we need to pin all pagetables to make sure they get their
598 * mfns turned into pfns. Search the list for any unpinned pgds and pin
599 * them (unpinned pgds are not currently in use, probably because the
600 * process is under construction or destruction).
601 */
602void xen_mm_pin_all(void)
603{
604 unsigned long flags;
605 struct page *page;
451 606
452 xen_mc_issue(0); 607 spin_lock_irqsave(&pgd_lock, flags);
608
609 list_for_each_entry(page, &pgd_list, lru) {
610 if (!PagePinned(page)) {
611 xen_pgd_pin((pgd_t *)page_address(page));
612 SetPageSavePinned(page);
613 }
614 }
615
616 spin_unlock_irqrestore(&pgd_lock, flags);
453} 617}
454 618
455/* The init_mm pagetable is really pinned as soon as its created, but 619/* The init_mm pagetable is really pinned as soon as its created, but
@@ -509,6 +673,29 @@ static void xen_pgd_unpin(pgd_t *pgd)
509 xen_mc_issue(0); 673 xen_mc_issue(0);
510} 674}
511 675
676/*
677 * On resume, undo any pinning done at save, so that the rest of the
678 * kernel doesn't see any unexpected pinned pagetables.
679 */
680void xen_mm_unpin_all(void)
681{
682 unsigned long flags;
683 struct page *page;
684
685 spin_lock_irqsave(&pgd_lock, flags);
686
687 list_for_each_entry(page, &pgd_list, lru) {
688 if (PageSavePinned(page)) {
689 BUG_ON(!PagePinned(page));
690 printk("unpinning pinned %p\n", page_address(page));
691 xen_pgd_unpin((pgd_t *)page_address(page));
692 ClearPageSavePinned(page);
693 }
694 }
695
696 spin_unlock_irqrestore(&pgd_lock, flags);
697}
698
512void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 699void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
513{ 700{
514 spin_lock(&next->page_table_lock); 701 spin_lock(&next->page_table_lock);
@@ -602,7 +789,7 @@ void xen_exit_mmap(struct mm_struct *mm)
602 spin_lock(&mm->page_table_lock); 789 spin_lock(&mm->page_table_lock);
603 790
604 /* pgd may not be pinned in the error exit path of execve */ 791 /* pgd may not be pinned in the error exit path of execve */
605 if (PagePinned(virt_to_page(mm->pgd))) 792 if (page_pinned(mm->pgd))
606 xen_pgd_unpin(mm->pgd); 793 xen_pgd_unpin(mm->pgd);
607 794
608 spin_unlock(&mm->page_table_lock); 795 spin_unlock(&mm->page_table_lock);
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index b5e189b1519d..e3dd09e25c63 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -25,10 +25,6 @@ enum pt_level {
25 25
26void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 26void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
27 27
28void xen_set_pte(pte_t *ptep, pte_t pteval);
29void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
30 pte_t *ptep, pte_t pteval);
31void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
32 28
33void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next); 29void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
34void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); 30void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
@@ -37,31 +33,23 @@ void xen_exit_mmap(struct mm_struct *mm);
37void xen_pgd_pin(pgd_t *pgd); 33void xen_pgd_pin(pgd_t *pgd);
38//void xen_pgd_unpin(pgd_t *pgd); 34//void xen_pgd_unpin(pgd_t *pgd);
39 35
40#ifdef CONFIG_X86_PAE 36pteval_t xen_pte_val(pte_t);
41unsigned long long xen_pte_val(pte_t); 37pmdval_t xen_pmd_val(pmd_t);
42unsigned long long xen_pmd_val(pmd_t); 38pgdval_t xen_pgd_val(pgd_t);
43unsigned long long xen_pgd_val(pgd_t);
44 39
45pte_t xen_make_pte(unsigned long long); 40pte_t xen_make_pte(pteval_t);
46pmd_t xen_make_pmd(unsigned long long); 41pmd_t xen_make_pmd(pmdval_t);
47pgd_t xen_make_pgd(unsigned long long); 42pgd_t xen_make_pgd(pgdval_t);
48 43
44void xen_set_pte(pte_t *ptep, pte_t pteval);
49void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 45void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
50 pte_t *ptep, pte_t pteval); 46 pte_t *ptep, pte_t pteval);
51void xen_set_pte_atomic(pte_t *ptep, pte_t pte); 47void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
48void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
52void xen_set_pud(pud_t *ptr, pud_t val); 49void xen_set_pud(pud_t *ptr, pud_t val);
50void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
51void xen_set_pud_hyper(pud_t *ptr, pud_t val);
53void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 52void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
54void xen_pmd_clear(pmd_t *pmdp); 53void xen_pmd_clear(pmd_t *pmdp);
55 54
56
57#else
58unsigned long xen_pte_val(pte_t);
59unsigned long xen_pmd_val(pmd_t);
60unsigned long xen_pgd_val(pgd_t);
61
62pte_t xen_make_pte(unsigned long);
63pmd_t xen_make_pmd(unsigned long);
64pgd_t xen_make_pgd(unsigned long);
65#endif
66
67#endif /* _XEN_MMU_H */ 55#endif /* _XEN_MMU_H */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 82517e4a752a..488447878a9d 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -16,6 +16,7 @@
16#include <asm/xen/hypervisor.h> 16#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h> 17#include <asm/xen/hypercall.h>
18 18
19#include <xen/page.h>
19#include <xen/interface/callback.h> 20#include <xen/interface/callback.h>
20#include <xen/interface/physdev.h> 21#include <xen/interface/physdev.h>
21#include <xen/features.h> 22#include <xen/features.h>
@@ -27,8 +28,6 @@
27extern const char xen_hypervisor_callback[]; 28extern const char xen_hypervisor_callback[];
28extern const char xen_failsafe_callback[]; 29extern const char xen_failsafe_callback[];
29 30
30unsigned long *phys_to_machine_mapping;
31EXPORT_SYMBOL(phys_to_machine_mapping);
32 31
33/** 32/**
34 * machine_specific_memory_setup - Hook for machine specific memory setup. 33 * machine_specific_memory_setup - Hook for machine specific memory setup.
@@ -38,6 +37,8 @@ char * __init xen_memory_setup(void)
38{ 37{
39 unsigned long max_pfn = xen_start_info->nr_pages; 38 unsigned long max_pfn = xen_start_info->nr_pages;
40 39
40 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
41
41 e820.nr_map = 0; 42 e820.nr_map = 0;
42 add_memory_region(0, LOWMEMSIZE(), E820_RAM); 43 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
43 add_memory_region(HIGH_MEMORY, PFN_PHYS(max_pfn)-HIGH_MEMORY, E820_RAM); 44 add_memory_region(HIGH_MEMORY, PFN_PHYS(max_pfn)-HIGH_MEMORY, E820_RAM);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 94e69000f982..d2e3c20127d7 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,7 +35,7 @@
35#include "xen-ops.h" 35#include "xen-ops.h"
36#include "mmu.h" 36#include "mmu.h"
37 37
38static cpumask_t xen_cpu_initialized_map; 38cpumask_t xen_cpu_initialized_map;
39static DEFINE_PER_CPU(int, resched_irq) = -1; 39static DEFINE_PER_CPU(int, resched_irq) = -1;
40static DEFINE_PER_CPU(int, callfunc_irq) = -1; 40static DEFINE_PER_CPU(int, callfunc_irq) = -1;
41static DEFINE_PER_CPU(int, debug_irq) = -1; 41static DEFINE_PER_CPU(int, debug_irq) = -1;
@@ -65,6 +65,12 @@ static struct call_data_struct *call_data;
65 */ 65 */
66static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 66static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
67{ 67{
68#ifdef CONFIG_X86_32
69 __get_cpu_var(irq_stat).irq_resched_count++;
70#else
71 add_pda(irq_resched_count, 1);
72#endif
73
68 return IRQ_HANDLED; 74 return IRQ_HANDLED;
69} 75}
70 76
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
new file mode 100644
index 000000000000..251669a932d4
--- /dev/null
+++ b/arch/x86/xen/suspend.c
@@ -0,0 +1,45 @@
1#include <linux/types.h>
2
3#include <xen/interface/xen.h>
4#include <xen/grant_table.h>
5#include <xen/events.h>
6
7#include <asm/xen/hypercall.h>
8#include <asm/xen/page.h>
9
10#include "xen-ops.h"
11#include "mmu.h"
12
13void xen_pre_suspend(void)
14{
15 xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
16 xen_start_info->console.domU.mfn =
17 mfn_to_pfn(xen_start_info->console.domU.mfn);
18
19 BUG_ON(!irqs_disabled());
20
21 HYPERVISOR_shared_info = &xen_dummy_shared_info;
22 if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
23 __pte_ma(0), 0))
24 BUG();
25}
26
27void xen_post_suspend(int suspend_cancelled)
28{
29 xen_setup_shared_info();
30
31 if (suspend_cancelled) {
32 xen_start_info->store_mfn =
33 pfn_to_mfn(xen_start_info->store_mfn);
34 xen_start_info->console.domU.mfn =
35 pfn_to_mfn(xen_start_info->console.domU.mfn);
36 } else {
37#ifdef CONFIG_SMP
38 xen_cpu_initialized_map = cpu_online_map;
39#endif
40 xen_vcpu_restore();
41 xen_timer_resume();
42 }
43
44}
45
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 52b2e3856980..0da249dbdc49 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -565,6 +565,19 @@ void xen_setup_cpu_clockevents(void)
565 clockevents_register_device(&__get_cpu_var(xen_clock_events)); 565 clockevents_register_device(&__get_cpu_var(xen_clock_events));
566} 566}
567 567
568void xen_timer_resume(void)
569{
570 int cpu;
571
572 if (xen_clockevent != &xen_vcpuop_clockevent)
573 return;
574
575 for_each_online_cpu(cpu) {
576 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
577 BUG();
578 }
579}
580
568__init void xen_time_init(void) 581__init void xen_time_init(void)
569{ 582{
570 int cpu = smp_processor_id(); 583 int cpu = smp_processor_id();
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 288d587ce73c..ef6c9e005f90 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -7,6 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <asm/boot.h> 8#include <asm/boot.h>
9#include <xen/interface/elfnote.h> 9#include <xen/interface/elfnote.h>
10#include <asm/xen/interface.h>
10 11
11 __INIT 12 __INIT
12ENTRY(startup_xen) 13ENTRY(startup_xen)
@@ -30,11 +31,11 @@ ENTRY(hypercall_page)
30 ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long startup_xen) 31 ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long startup_xen)
31 ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long hypercall_page) 32 ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long hypercall_page)
32 ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb") 33 ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb")
33#ifdef CONFIG_X86_PAE
34 ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes") 34 ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
35#else
36 ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "no")
37#endif
38 ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") 35 ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
36 ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
37 .quad _PAGE_PRESENT; .quad _PAGE_PRESENT)
38 ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
39 ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long __HYPERVISOR_VIRT_START)
39 40
40#endif /*CONFIG_XEN */ 41#endif /*CONFIG_XEN */
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index f1063ae08037..9a055592a307 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -9,18 +9,26 @@
9extern const char xen_hypervisor_callback[]; 9extern const char xen_hypervisor_callback[];
10extern const char xen_failsafe_callback[]; 10extern const char xen_failsafe_callback[];
11 11
12struct trap_info;
12void xen_copy_trap_info(struct trap_info *traps); 13void xen_copy_trap_info(struct trap_info *traps);
13 14
14DECLARE_PER_CPU(unsigned long, xen_cr3); 15DECLARE_PER_CPU(unsigned long, xen_cr3);
15DECLARE_PER_CPU(unsigned long, xen_current_cr3); 16DECLARE_PER_CPU(unsigned long, xen_current_cr3);
16 17
17extern struct start_info *xen_start_info; 18extern struct start_info *xen_start_info;
19extern struct shared_info xen_dummy_shared_info;
18extern struct shared_info *HYPERVISOR_shared_info; 20extern struct shared_info *HYPERVISOR_shared_info;
19 21
22void xen_setup_mfn_list_list(void);
23void xen_setup_shared_info(void);
24
20char * __init xen_memory_setup(void); 25char * __init xen_memory_setup(void);
21void __init xen_arch_setup(void); 26void __init xen_arch_setup(void);
22void __init xen_init_IRQ(void); 27void __init xen_init_IRQ(void);
23void xen_enable_sysenter(void); 28void xen_enable_sysenter(void);
29void xen_vcpu_restore(void);
30
31void __init xen_build_dynamic_phys_to_machine(void);
24 32
25void xen_setup_timer(int cpu); 33void xen_setup_timer(int cpu);
26void xen_setup_cpu_clockevents(void); 34void xen_setup_cpu_clockevents(void);
@@ -29,6 +37,7 @@ void __init xen_time_init(void);
29unsigned long xen_get_wallclock(void); 37unsigned long xen_get_wallclock(void);
30int xen_set_wallclock(unsigned long time); 38int xen_set_wallclock(unsigned long time);
31unsigned long long xen_sched_clock(void); 39unsigned long long xen_sched_clock(void);
40void xen_timer_resume(void);
32 41
33irqreturn_t xen_debug_interrupt(int irq, void *dev_id); 42irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
34 43
@@ -54,6 +63,8 @@ int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
54int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), 63int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
55 void *info, int wait); 64 void *info, int wait);
56 65
66extern cpumask_t xen_cpu_initialized_map;
67
57 68
58/* Declare an asm function, along with symbols needed to make it 69/* Declare an asm function, along with symbols needed to make it
59 inlineable */ 70 inlineable */
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index dd68f8541c2d..db2ae4216279 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -39,9 +39,14 @@ static int xencons_irq;
39 39
40/* ------------------------------------------------------------------ */ 40/* ------------------------------------------------------------------ */
41 41
42static unsigned long console_pfn = ~0ul;
43
42static inline struct xencons_interface *xencons_interface(void) 44static inline struct xencons_interface *xencons_interface(void)
43{ 45{
44 return mfn_to_virt(xen_start_info->console.domU.mfn); 46 if (console_pfn == ~0ul)
47 return mfn_to_virt(xen_start_info->console.domU.mfn);
48 else
49 return __va(console_pfn << PAGE_SHIFT);
45} 50}
46 51
47static inline void notify_daemon(void) 52static inline void notify_daemon(void)
@@ -101,20 +106,32 @@ static int __init xen_init(void)
101{ 106{
102 struct hvc_struct *hp; 107 struct hvc_struct *hp;
103 108
104 if (!is_running_on_xen()) 109 if (!is_running_on_xen() ||
105 return 0; 110 is_initial_xendomain() ||
111 !xen_start_info->console.domU.evtchn)
112 return -ENODEV;
106 113
107 xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); 114 xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
108 if (xencons_irq < 0) 115 if (xencons_irq < 0)
109 xencons_irq = 0 /* NO_IRQ */; 116 xencons_irq = 0; /* NO_IRQ */
117
110 hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256); 118 hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
111 if (IS_ERR(hp)) 119 if (IS_ERR(hp))
112 return PTR_ERR(hp); 120 return PTR_ERR(hp);
113 121
114 hvc = hp; 122 hvc = hp;
123
124 console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn);
125
115 return 0; 126 return 0;
116} 127}
117 128
129void xen_console_resume(void)
130{
131 if (xencons_irq)
132 rebind_evtchn_irq(xen_start_info->console.domU.evtchn, xencons_irq);
133}
134
118static void __exit xen_fini(void) 135static void __exit xen_fini(void)
119{ 136{
120 if (hvc) 137 if (hvc)
@@ -134,12 +151,28 @@ module_init(xen_init);
134module_exit(xen_fini); 151module_exit(xen_fini);
135console_initcall(xen_cons_init); 152console_initcall(xen_cons_init);
136 153
154static void raw_console_write(const char *str, int len)
155{
156 while(len > 0) {
157 int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
158 if (rc <= 0)
159 break;
160
161 str += rc;
162 len -= rc;
163 }
164}
165
166#ifdef CONFIG_EARLY_PRINTK
137static void xenboot_write_console(struct console *console, const char *string, 167static void xenboot_write_console(struct console *console, const char *string,
138 unsigned len) 168 unsigned len)
139{ 169{
140 unsigned int linelen, off = 0; 170 unsigned int linelen, off = 0;
141 const char *pos; 171 const char *pos;
142 172
173 raw_console_write(string, len);
174
175 write_console(0, "(early) ", 8);
143 while (off < len && NULL != (pos = strchr(string+off, '\n'))) { 176 while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
144 linelen = pos-string+off; 177 linelen = pos-string+off;
145 if (off + linelen > len) 178 if (off + linelen > len)
@@ -155,5 +188,23 @@ static void xenboot_write_console(struct console *console, const char *string,
155struct console xenboot_console = { 188struct console xenboot_console = {
156 .name = "xenboot", 189 .name = "xenboot",
157 .write = xenboot_write_console, 190 .write = xenboot_write_console,
158 .flags = CON_PRINTBUFFER | CON_BOOT, 191 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
159}; 192};
193#endif /* CONFIG_EARLY_PRINTK */
194
195void xen_raw_console_write(const char *str)
196{
197 raw_console_write(str, strlen(str));
198}
199
200void xen_raw_printk(const char *fmt, ...)
201{
202 static char buf[512];
203 va_list ap;
204
205 va_start(ap, fmt);
206 vsnprintf(buf, sizeof(buf), fmt, ap);
207 va_end(ap);
208
209 xen_raw_console_write(buf);
210}
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 0f47f4697cdf..9ce3b3baf3a2 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -66,6 +66,9 @@ static irqreturn_t input_handler(int rq, void *dev_id)
66 case XENKBD_TYPE_MOTION: 66 case XENKBD_TYPE_MOTION:
67 input_report_rel(dev, REL_X, event->motion.rel_x); 67 input_report_rel(dev, REL_X, event->motion.rel_x);
68 input_report_rel(dev, REL_Y, event->motion.rel_y); 68 input_report_rel(dev, REL_Y, event->motion.rel_y);
69 if (event->motion.rel_z)
70 input_report_rel(dev, REL_WHEEL,
71 -event->motion.rel_z);
69 break; 72 break;
70 case XENKBD_TYPE_KEY: 73 case XENKBD_TYPE_KEY:
71 dev = NULL; 74 dev = NULL;
@@ -84,6 +87,9 @@ static irqreturn_t input_handler(int rq, void *dev_id)
84 case XENKBD_TYPE_POS: 87 case XENKBD_TYPE_POS:
85 input_report_abs(dev, ABS_X, event->pos.abs_x); 88 input_report_abs(dev, ABS_X, event->pos.abs_x);
86 input_report_abs(dev, ABS_Y, event->pos.abs_y); 89 input_report_abs(dev, ABS_Y, event->pos.abs_y);
90 if (event->pos.rel_z)
91 input_report_rel(dev, REL_WHEEL,
92 -event->pos.rel_z);
87 break; 93 break;
88 } 94 }
89 if (dev) 95 if (dev)
@@ -152,7 +158,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
152 ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); 158 ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
153 for (i = BTN_LEFT; i <= BTN_TASK; i++) 159 for (i = BTN_LEFT; i <= BTN_TASK; i++)
154 set_bit(i, ptr->keybit); 160 set_bit(i, ptr->keybit);
155 ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y); 161 ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
156 input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); 162 input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
157 input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); 163 input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
158 164
@@ -294,6 +300,16 @@ InitWait:
294 */ 300 */
295 if (dev->state != XenbusStateConnected) 301 if (dev->state != XenbusStateConnected)
296 goto InitWait; /* no InitWait seen yet, fudge it */ 302 goto InitWait; /* no InitWait seen yet, fudge it */
303
304 /* Set input abs params to match backend screen res */
305 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
306 "width", "%d", &val) > 0)
307 input_set_abs_params(info->ptr, ABS_X, 0, val, 0, 0);
308
309 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
310 "height", "%d", &val) > 0)
311 input_set_abs_params(info->ptr, ABS_Y, 0, val, 0, 0);
312
297 break; 313 break;
298 314
299 case XenbusStateClosing: 315 case XenbusStateClosing:
@@ -337,4 +353,6 @@ static void __exit xenkbd_cleanup(void)
337module_init(xenkbd_init); 353module_init(xenkbd_init);
338module_exit(xenkbd_cleanup); 354module_exit(xenkbd_cleanup);
339 355
356MODULE_DESCRIPTION("Xen virtual keyboard/pointer device frontend");
340MODULE_LICENSE("GPL"); 357MODULE_LICENSE("GPL");
358MODULE_ALIAS("xen:vkbd");
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 005bd045d2eb..5faefeaf6790 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -136,7 +136,6 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
136 * first step in the migration to the kernel types. pte_pfn is already defined 136 * first step in the migration to the kernel types. pte_pfn is already defined
137 * in the kernel. */ 137 * in the kernel. */
138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) 138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
139#define pte_flags(x) (pte_val(x) & ~PAGE_MASK)
140#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) 139#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
141 140
142/* interrupts_and_traps.c: */ 141/* interrupts_and_traps.c: */
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 619a6f8d65a2..47ed39b52f9c 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -18,6 +18,7 @@
18 * frame buffer. 18 * frame buffer.
19 */ 19 */
20 20
21#include <linux/console.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/fb.h> 24#include <linux/fb.h>
@@ -42,37 +43,68 @@ struct xenfb_info {
42 struct xenfb_page *page; 43 struct xenfb_page *page;
43 unsigned long *mfns; 44 unsigned long *mfns;
44 int update_wanted; /* XENFB_TYPE_UPDATE wanted */ 45 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
46 int feature_resize; /* XENFB_TYPE_RESIZE ok */
47 struct xenfb_resize resize; /* protected by resize_lock */
48 int resize_dpy; /* ditto */
49 spinlock_t resize_lock;
45 50
46 struct xenbus_device *xbdev; 51 struct xenbus_device *xbdev;
47}; 52};
48 53
49static u32 xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8; 54#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
50 55
56enum { KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT };
57static int video[KPARAM_CNT] = { 2, XENFB_WIDTH, XENFB_HEIGHT };
58module_param_array(video, int, NULL, 0);
59MODULE_PARM_DESC(video,
60 "Video memory size in MB, width, height in pixels (default 2,800,600)");
61
62static void xenfb_make_preferred_console(void);
51static int xenfb_remove(struct xenbus_device *); 63static int xenfb_remove(struct xenbus_device *);
52static void xenfb_init_shared_page(struct xenfb_info *); 64static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
53static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *); 65static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
54static void xenfb_disconnect_backend(struct xenfb_info *); 66static void xenfb_disconnect_backend(struct xenfb_info *);
55 67
68static void xenfb_send_event(struct xenfb_info *info,
69 union xenfb_out_event *event)
70{
71 u32 prod;
72
73 prod = info->page->out_prod;
74 /* caller ensures !xenfb_queue_full() */
75 mb(); /* ensure ring space available */
76 XENFB_OUT_RING_REF(info->page, prod) = *event;
77 wmb(); /* ensure ring contents visible */
78 info->page->out_prod = prod + 1;
79
80 notify_remote_via_irq(info->irq);
81}
82
56static void xenfb_do_update(struct xenfb_info *info, 83static void xenfb_do_update(struct xenfb_info *info,
57 int x, int y, int w, int h) 84 int x, int y, int w, int h)
58{ 85{
59 union xenfb_out_event event; 86 union xenfb_out_event event;
60 u32 prod;
61 87
88 memset(&event, 0, sizeof(event));
62 event.type = XENFB_TYPE_UPDATE; 89 event.type = XENFB_TYPE_UPDATE;
63 event.update.x = x; 90 event.update.x = x;
64 event.update.y = y; 91 event.update.y = y;
65 event.update.width = w; 92 event.update.width = w;
66 event.update.height = h; 93 event.update.height = h;
67 94
68 prod = info->page->out_prod;
69 /* caller ensures !xenfb_queue_full() */ 95 /* caller ensures !xenfb_queue_full() */
70 mb(); /* ensure ring space available */ 96 xenfb_send_event(info, &event);
71 XENFB_OUT_RING_REF(info->page, prod) = event; 97}
72 wmb(); /* ensure ring contents visible */
73 info->page->out_prod = prod + 1;
74 98
75 notify_remote_via_irq(info->irq); 99static void xenfb_do_resize(struct xenfb_info *info)
100{
101 union xenfb_out_event event;
102
103 memset(&event, 0, sizeof(event));
104 event.resize = info->resize;
105
106 /* caller ensures !xenfb_queue_full() */
107 xenfb_send_event(info, &event);
76} 108}
77 109
78static int xenfb_queue_full(struct xenfb_info *info) 110static int xenfb_queue_full(struct xenfb_info *info)
@@ -84,12 +116,28 @@ static int xenfb_queue_full(struct xenfb_info *info)
84 return prod - cons == XENFB_OUT_RING_LEN; 116 return prod - cons == XENFB_OUT_RING_LEN;
85} 117}
86 118
119static void xenfb_handle_resize_dpy(struct xenfb_info *info)
120{
121 unsigned long flags;
122
123 spin_lock_irqsave(&info->resize_lock, flags);
124 if (info->resize_dpy) {
125 if (!xenfb_queue_full(info)) {
126 info->resize_dpy = 0;
127 xenfb_do_resize(info);
128 }
129 }
130 spin_unlock_irqrestore(&info->resize_lock, flags);
131}
132
87static void xenfb_refresh(struct xenfb_info *info, 133static void xenfb_refresh(struct xenfb_info *info,
88 int x1, int y1, int w, int h) 134 int x1, int y1, int w, int h)
89{ 135{
90 unsigned long flags; 136 unsigned long flags;
91 int y2 = y1 + h - 1;
92 int x2 = x1 + w - 1; 137 int x2 = x1 + w - 1;
138 int y2 = y1 + h - 1;
139
140 xenfb_handle_resize_dpy(info);
93 141
94 if (!info->update_wanted) 142 if (!info->update_wanted)
95 return; 143 return;
@@ -222,6 +270,57 @@ static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
222 return res; 270 return res;
223} 271}
224 272
273static int
274xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
275{
276 struct xenfb_info *xenfb_info;
277 int required_mem_len;
278
279 xenfb_info = info->par;
280
281 if (!xenfb_info->feature_resize) {
282 if (var->xres == video[KPARAM_WIDTH] &&
283 var->yres == video[KPARAM_HEIGHT] &&
284 var->bits_per_pixel == xenfb_info->page->depth) {
285 return 0;
286 }
287 return -EINVAL;
288 }
289
290 /* Can't resize past initial width and height */
291 if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
292 return -EINVAL;
293
294 required_mem_len = var->xres * var->yres * xenfb_info->page->depth / 8;
295 if (var->bits_per_pixel == xenfb_info->page->depth &&
296 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
297 required_mem_len <= info->fix.smem_len) {
298 var->xres_virtual = var->xres;
299 var->yres_virtual = var->yres;
300 return 0;
301 }
302 return -EINVAL;
303}
304
305static int xenfb_set_par(struct fb_info *info)
306{
307 struct xenfb_info *xenfb_info;
308 unsigned long flags;
309
310 xenfb_info = info->par;
311
312 spin_lock_irqsave(&xenfb_info->resize_lock, flags);
313 xenfb_info->resize.type = XENFB_TYPE_RESIZE;
314 xenfb_info->resize.width = info->var.xres;
315 xenfb_info->resize.height = info->var.yres;
316 xenfb_info->resize.stride = info->fix.line_length;
317 xenfb_info->resize.depth = info->var.bits_per_pixel;
318 xenfb_info->resize.offset = 0;
319 xenfb_info->resize_dpy = 1;
320 spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
321 return 0;
322}
323
225static struct fb_ops xenfb_fb_ops = { 324static struct fb_ops xenfb_fb_ops = {
226 .owner = THIS_MODULE, 325 .owner = THIS_MODULE,
227 .fb_read = fb_sys_read, 326 .fb_read = fb_sys_read,
@@ -230,6 +329,8 @@ static struct fb_ops xenfb_fb_ops = {
230 .fb_fillrect = xenfb_fillrect, 329 .fb_fillrect = xenfb_fillrect,
231 .fb_copyarea = xenfb_copyarea, 330 .fb_copyarea = xenfb_copyarea,
232 .fb_imageblit = xenfb_imageblit, 331 .fb_imageblit = xenfb_imageblit,
332 .fb_check_var = xenfb_check_var,
333 .fb_set_par = xenfb_set_par,
233}; 334};
234 335
235static irqreturn_t xenfb_event_handler(int rq, void *dev_id) 336static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
@@ -258,6 +359,8 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
258{ 359{
259 struct xenfb_info *info; 360 struct xenfb_info *info;
260 struct fb_info *fb_info; 361 struct fb_info *fb_info;
362 int fb_size;
363 int val;
261 int ret; 364 int ret;
262 365
263 info = kzalloc(sizeof(*info), GFP_KERNEL); 366 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -265,18 +368,35 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
265 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 368 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
266 return -ENOMEM; 369 return -ENOMEM;
267 } 370 }
371
372 /* Limit kernel param videoram amount to what is in xenstore */
373 if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
374 if (val < video[KPARAM_MEM])
375 video[KPARAM_MEM] = val;
376 }
377
378 /* If requested res does not fit in available memory, use default */
379 fb_size = video[KPARAM_MEM] * 1024 * 1024;
380 if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH / 8
381 > fb_size) {
382 video[KPARAM_WIDTH] = XENFB_WIDTH;
383 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
384 fb_size = XENFB_DEFAULT_FB_LEN;
385 }
386
268 dev->dev.driver_data = info; 387 dev->dev.driver_data = info;
269 info->xbdev = dev; 388 info->xbdev = dev;
270 info->irq = -1; 389 info->irq = -1;
271 info->x1 = info->y1 = INT_MAX; 390 info->x1 = info->y1 = INT_MAX;
272 spin_lock_init(&info->dirty_lock); 391 spin_lock_init(&info->dirty_lock);
392 spin_lock_init(&info->resize_lock);
273 393
274 info->fb = vmalloc(xenfb_mem_len); 394 info->fb = vmalloc(fb_size);
275 if (info->fb == NULL) 395 if (info->fb == NULL)
276 goto error_nomem; 396 goto error_nomem;
277 memset(info->fb, 0, xenfb_mem_len); 397 memset(info->fb, 0, fb_size);
278 398
279 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 399 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
280 400
281 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); 401 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
282 if (!info->mfns) 402 if (!info->mfns)
@@ -287,8 +407,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
287 if (!info->page) 407 if (!info->page)
288 goto error_nomem; 408 goto error_nomem;
289 409
290 xenfb_init_shared_page(info);
291
292 /* abusing framebuffer_alloc() to allocate pseudo_palette */ 410 /* abusing framebuffer_alloc() to allocate pseudo_palette */
293 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL); 411 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
294 if (fb_info == NULL) 412 if (fb_info == NULL)
@@ -301,9 +419,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
301 fb_info->screen_base = info->fb; 419 fb_info->screen_base = info->fb;
302 420
303 fb_info->fbops = &xenfb_fb_ops; 421 fb_info->fbops = &xenfb_fb_ops;
304 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width; 422 fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
305 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height; 423 fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
306 fb_info->var.bits_per_pixel = info->page->depth; 424 fb_info->var.bits_per_pixel = XENFB_DEPTH;
307 425
308 fb_info->var.red = (struct fb_bitfield){16, 8, 0}; 426 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
309 fb_info->var.green = (struct fb_bitfield){8, 8, 0}; 427 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
@@ -315,9 +433,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
315 fb_info->var.vmode = FB_VMODE_NONINTERLACED; 433 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
316 434
317 fb_info->fix.visual = FB_VISUAL_TRUECOLOR; 435 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
318 fb_info->fix.line_length = info->page->line_length; 436 fb_info->fix.line_length = fb_info->var.xres * XENFB_DEPTH / 8;
319 fb_info->fix.smem_start = 0; 437 fb_info->fix.smem_start = 0;
320 fb_info->fix.smem_len = xenfb_mem_len; 438 fb_info->fix.smem_len = fb_size;
321 strcpy(fb_info->fix.id, "xen"); 439 strcpy(fb_info->fix.id, "xen");
322 fb_info->fix.type = FB_TYPE_PACKED_PIXELS; 440 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
323 fb_info->fix.accel = FB_ACCEL_NONE; 441 fb_info->fix.accel = FB_ACCEL_NONE;
@@ -334,6 +452,8 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
334 fb_info->fbdefio = &xenfb_defio; 452 fb_info->fbdefio = &xenfb_defio;
335 fb_deferred_io_init(fb_info); 453 fb_deferred_io_init(fb_info);
336 454
455 xenfb_init_shared_page(info, fb_info);
456
337 ret = register_framebuffer(fb_info); 457 ret = register_framebuffer(fb_info);
338 if (ret) { 458 if (ret) {
339 fb_deferred_io_cleanup(fb_info); 459 fb_deferred_io_cleanup(fb_info);
@@ -348,6 +468,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
348 if (ret < 0) 468 if (ret < 0)
349 goto error; 469 goto error;
350 470
471 xenfb_make_preferred_console();
351 return 0; 472 return 0;
352 473
353 error_nomem: 474 error_nomem:
@@ -358,12 +479,34 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
358 return ret; 479 return ret;
359} 480}
360 481
482static __devinit void
483xenfb_make_preferred_console(void)
484{
485 struct console *c;
486
487 if (console_set_on_cmdline)
488 return;
489
490 acquire_console_sem();
491 for (c = console_drivers; c; c = c->next) {
492 if (!strcmp(c->name, "tty") && c->index == 0)
493 break;
494 }
495 release_console_sem();
496 if (c) {
497 unregister_console(c);
498 c->flags |= CON_CONSDEV;
499 c->flags &= ~CON_PRINTBUFFER; /* don't print again */
500 register_console(c);
501 }
502}
503
361static int xenfb_resume(struct xenbus_device *dev) 504static int xenfb_resume(struct xenbus_device *dev)
362{ 505{
363 struct xenfb_info *info = dev->dev.driver_data; 506 struct xenfb_info *info = dev->dev.driver_data;
364 507
365 xenfb_disconnect_backend(info); 508 xenfb_disconnect_backend(info);
366 xenfb_init_shared_page(info); 509 xenfb_init_shared_page(info, info->fb_info);
367 return xenfb_connect_backend(dev, info); 510 return xenfb_connect_backend(dev, info);
368} 511}
369 512
@@ -391,20 +534,23 @@ static unsigned long vmalloc_to_mfn(void *address)
391 return pfn_to_mfn(vmalloc_to_pfn(address)); 534 return pfn_to_mfn(vmalloc_to_pfn(address));
392} 535}
393 536
394static void xenfb_init_shared_page(struct xenfb_info *info) 537static void xenfb_init_shared_page(struct xenfb_info *info,
538 struct fb_info *fb_info)
395{ 539{
396 int i; 540 int i;
541 int epd = PAGE_SIZE / sizeof(info->mfns[0]);
397 542
398 for (i = 0; i < info->nr_pages; i++) 543 for (i = 0; i < info->nr_pages; i++)
399 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE); 544 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
400 545
401 info->page->pd[0] = vmalloc_to_mfn(info->mfns); 546 for (i = 0; i * epd < info->nr_pages; i++)
402 info->page->pd[1] = 0; 547 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
403 info->page->width = XENFB_WIDTH; 548
404 info->page->height = XENFB_HEIGHT; 549 info->page->width = fb_info->var.xres;
405 info->page->depth = XENFB_DEPTH; 550 info->page->height = fb_info->var.yres;
406 info->page->line_length = (info->page->depth / 8) * info->page->width; 551 info->page->depth = fb_info->var.bits_per_pixel;
407 info->page->mem_length = xenfb_mem_len; 552 info->page->line_length = fb_info->fix.line_length;
553 info->page->mem_length = fb_info->fix.smem_len;
408 info->page->in_cons = info->page->in_prod = 0; 554 info->page->in_cons = info->page->in_prod = 0;
409 info->page->out_cons = info->page->out_prod = 0; 555 info->page->out_cons = info->page->out_prod = 0;
410} 556}
@@ -504,6 +650,11 @@ InitWait:
504 val = 0; 650 val = 0;
505 if (val) 651 if (val)
506 info->update_wanted = 1; 652 info->update_wanted = 1;
653
654 if (xenbus_scanf(XBT_NIL, dev->otherend,
655 "feature-resize", "%d", &val) < 0)
656 val = 0;
657 info->feature_resize = val;
507 break; 658 break;
508 659
509 case XenbusStateClosing: 660 case XenbusStateClosing:
@@ -547,4 +698,6 @@ static void __exit xenfb_cleanup(void)
547module_init(xenfb_init); 698module_init(xenfb_init);
548module_exit(xenfb_cleanup); 699module_exit(xenfb_cleanup);
549 700
701MODULE_DESCRIPTION("Xen virtual framebuffer device frontend");
550MODULE_LICENSE("GPL"); 702MODULE_LICENSE("GPL");
703MODULE_ALIAS("xen:vfb");
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 37af04f1ffd9..363286c54290 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,4 @@
1obj-y += grant-table.o features.o events.o 1obj-y += grant-table.o features.o events.o manage.o
2obj-y += xenbus/ 2obj-y += xenbus/
3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
4obj-$(CONFIG_XEN_BALLOON) += balloon.o 4obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ab25ba6cbbb9..591bc29b55f5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -225,7 +225,7 @@ static int increase_reservation(unsigned long nr_pages)
225 page = balloon_next_page(page); 225 page = balloon_next_page(page);
226 } 226 }
227 227
228 reservation.extent_start = (unsigned long)frame_list; 228 set_xen_guest_handle(reservation.extent_start, frame_list);
229 reservation.nr_extents = nr_pages; 229 reservation.nr_extents = nr_pages;
230 rc = HYPERVISOR_memory_op( 230 rc = HYPERVISOR_memory_op(
231 XENMEM_populate_physmap, &reservation); 231 XENMEM_populate_physmap, &reservation);
@@ -321,7 +321,7 @@ static int decrease_reservation(unsigned long nr_pages)
321 balloon_append(pfn_to_page(pfn)); 321 balloon_append(pfn_to_page(pfn));
322 } 322 }
323 323
324 reservation.extent_start = (unsigned long)frame_list; 324 set_xen_guest_handle(reservation.extent_start, frame_list);
325 reservation.nr_extents = nr_pages; 325 reservation.nr_extents = nr_pages;
326 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 326 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
327 BUG_ON(ret != nr_pages); 327 BUG_ON(ret != nr_pages);
@@ -368,7 +368,7 @@ static void balloon_process(struct work_struct *work)
368} 368}
369 369
370/* Resets the Xen limit, sets new target, and kicks off processing. */ 370/* Resets the Xen limit, sets new target, and kicks off processing. */
371void balloon_set_new_target(unsigned long target) 371static void balloon_set_new_target(unsigned long target)
372{ 372{
373 /* No need for lock. Not read-modify-write updates. */ 373 /* No need for lock. Not read-modify-write updates. */
374 balloon_stats.hard_limit = ~0UL; 374 balloon_stats.hard_limit = ~0UL;
@@ -483,7 +483,7 @@ static int dealloc_pte_fn(
483 .extent_order = 0, 483 .extent_order = 0,
484 .domid = DOMID_SELF 484 .domid = DOMID_SELF
485 }; 485 };
486 reservation.extent_start = (unsigned long)&mfn; 486 set_xen_guest_handle(reservation.extent_start, &mfn);
487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull)); 487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull));
488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); 488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
@@ -519,7 +519,7 @@ static struct page **alloc_empty_pages_and_pagevec(int nr_pages)
519 .extent_order = 0, 519 .extent_order = 0,
520 .domid = DOMID_SELF 520 .domid = DOMID_SELF
521 }; 521 };
522 reservation.extent_start = (unsigned long)&gmfn; 522 set_xen_guest_handle(reservation.extent_start, &gmfn);
523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
524 &reservation); 524 &reservation);
525 if (ret == 1) 525 if (ret == 1)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 4f0f22b020ea..73d78dc9b875 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -355,7 +355,7 @@ static void unbind_from_irq(unsigned int irq)
355 355
356 spin_lock(&irq_mapping_update_lock); 356 spin_lock(&irq_mapping_update_lock);
357 357
358 if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) { 358 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
359 close.port = evtchn; 359 close.port = evtchn;
360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 360 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
361 BUG(); 361 BUG();
@@ -375,7 +375,7 @@ static void unbind_from_irq(unsigned int irq)
375 evtchn_to_irq[evtchn] = -1; 375 evtchn_to_irq[evtchn] = -1;
376 irq_info[irq] = IRQ_UNBOUND; 376 irq_info[irq] = IRQ_UNBOUND;
377 377
378 dynamic_irq_init(irq); 378 dynamic_irq_cleanup(irq);
379 } 379 }
380 380
381 spin_unlock(&irq_mapping_update_lock); 381 spin_unlock(&irq_mapping_update_lock);
@@ -557,6 +557,33 @@ out:
557 put_cpu(); 557 put_cpu();
558} 558}
559 559
560/* Rebind a new event channel to an existing irq. */
561void rebind_evtchn_irq(int evtchn, int irq)
562{
563 /* Make sure the irq is masked, since the new event channel
564 will also be masked. */
565 disable_irq(irq);
566
567 spin_lock(&irq_mapping_update_lock);
568
569 /* After resume the irq<->evtchn mappings are all cleared out */
570 BUG_ON(evtchn_to_irq[evtchn] != -1);
571 /* Expect irq to have been bound before,
572 so the bindcount should be non-0 */
573 BUG_ON(irq_bindcount[irq] == 0);
574
575 evtchn_to_irq[evtchn] = irq;
576 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
577
578 spin_unlock(&irq_mapping_update_lock);
579
580 /* new event channels are always bound to cpu 0 */
581 irq_set_affinity(irq, cpumask_of_cpu(0));
582
583 /* Unmask the event channel. */
584 enable_irq(irq);
585}
586
560/* Rebind an evtchn so that it gets delivered to a specific cpu */ 587/* Rebind an evtchn so that it gets delivered to a specific cpu */
561static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 588static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
562{ 589{
@@ -647,6 +674,89 @@ static int retrigger_dynirq(unsigned int irq)
647 return ret; 674 return ret;
648} 675}
649 676
677static void restore_cpu_virqs(unsigned int cpu)
678{
679 struct evtchn_bind_virq bind_virq;
680 int virq, irq, evtchn;
681
682 for (virq = 0; virq < NR_VIRQS; virq++) {
683 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
684 continue;
685
686 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
687 BUG_ON(irq_info[irq].index != virq);
688
689 /* Get a new binding from Xen. */
690 bind_virq.virq = virq;
691 bind_virq.vcpu = cpu;
692 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
693 &bind_virq) != 0)
694 BUG();
695 evtchn = bind_virq.port;
696
697 /* Record the new mapping. */
698 evtchn_to_irq[evtchn] = irq;
699 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
700 bind_evtchn_to_cpu(evtchn, cpu);
701
702 /* Ready for use. */
703 unmask_evtchn(evtchn);
704 }
705}
706
707static void restore_cpu_ipis(unsigned int cpu)
708{
709 struct evtchn_bind_ipi bind_ipi;
710 int ipi, irq, evtchn;
711
712 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
713 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
714 continue;
715
716 BUG_ON(irq_info[irq].type != IRQT_IPI);
717 BUG_ON(irq_info[irq].index != ipi);
718
719 /* Get a new binding from Xen. */
720 bind_ipi.vcpu = cpu;
721 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
722 &bind_ipi) != 0)
723 BUG();
724 evtchn = bind_ipi.port;
725
726 /* Record the new mapping. */
727 evtchn_to_irq[evtchn] = irq;
728 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
729 bind_evtchn_to_cpu(evtchn, cpu);
730
731 /* Ready for use. */
732 unmask_evtchn(evtchn);
733
734 }
735}
736
737void xen_irq_resume(void)
738{
739 unsigned int cpu, irq, evtchn;
740
741 init_evtchn_cpu_bindings();
742
743 /* New event-channel space is not 'live' yet. */
744 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
745 mask_evtchn(evtchn);
746
747 /* No IRQ <-> event-channel mappings. */
748 for (irq = 0; irq < NR_IRQS; irq++)
749 irq_info[irq].evtchn = 0; /* zap event-channel binding */
750
751 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
752 evtchn_to_irq[evtchn] = -1;
753
754 for_each_possible_cpu(cpu) {
755 restore_cpu_virqs(cpu);
756 restore_cpu_ipis(cpu);
757 }
758}
759
650static struct irq_chip xen_dynamic_chip __read_mostly = { 760static struct irq_chip xen_dynamic_chip __read_mostly = {
651 .name = "xen-dyn", 761 .name = "xen-dyn",
652 .mask = disable_dynirq, 762 .mask = disable_dynirq,
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 52b6b41b909d..e9e11168616a 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -471,14 +471,14 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
471 return 0; 471 return 0;
472} 472}
473 473
474static int gnttab_resume(void) 474int gnttab_resume(void)
475{ 475{
476 if (max_nr_grant_frames() < nr_grant_frames) 476 if (max_nr_grant_frames() < nr_grant_frames)
477 return -ENOSYS; 477 return -ENOSYS;
478 return gnttab_map(0, nr_grant_frames - 1); 478 return gnttab_map(0, nr_grant_frames - 1);
479} 479}
480 480
481static int gnttab_suspend(void) 481int gnttab_suspend(void)
482{ 482{
483 arch_gnttab_unmap_shared(shared, nr_grant_frames); 483 arch_gnttab_unmap_shared(shared, nr_grant_frames);
484 return 0; 484 return 0;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
new file mode 100644
index 000000000000..5b546e365f00
--- /dev/null
+++ b/drivers/xen/manage.c
@@ -0,0 +1,252 @@
1/*
2 * Handle extern requests for shutdown, reboot and sysrq
3 */
4#include <linux/kernel.h>
5#include <linux/err.h>
6#include <linux/reboot.h>
7#include <linux/sysrq.h>
8#include <linux/stop_machine.h>
9#include <linux/freezer.h>
10
11#include <xen/xenbus.h>
12#include <xen/grant_table.h>
13#include <xen/events.h>
14#include <xen/hvc-console.h>
15#include <xen/xen-ops.h>
16
17#include <asm/xen/hypercall.h>
18#include <asm/xen/page.h>
19
20enum shutdown_state {
21 SHUTDOWN_INVALID = -1,
22 SHUTDOWN_POWEROFF = 0,
23 SHUTDOWN_SUSPEND = 2,
24 /* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
25 report a crash, not be instructed to crash!
26 HALT is the same as POWEROFF, as far as we're concerned. The tools use
27 the distinction when we return the reason code to them. */
28 SHUTDOWN_HALT = 4,
29};
30
31/* Ignore multiple shutdown requests. */
32static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
33
34#ifdef CONFIG_PM_SLEEP
35static int xen_suspend(void *data)
36{
37 int *cancelled = data;
38 int err;
39
40 BUG_ON(!irqs_disabled());
41
42 load_cr3(swapper_pg_dir);
43
44 err = device_power_down(PMSG_SUSPEND);
45 if (err) {
46 printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
47 err);
48 return err;
49 }
50
51 xen_mm_pin_all();
52 gnttab_suspend();
53 xen_pre_suspend();
54
55 /*
56 * This hypercall returns 1 if suspend was cancelled
57 * or the domain was merely checkpointed, and 0 if it
58 * is resuming in a new domain.
59 */
60 *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
61
62 xen_post_suspend(*cancelled);
63 gnttab_resume();
64 xen_mm_unpin_all();
65
66 device_power_up();
67
68 if (!*cancelled) {
69 xen_irq_resume();
70 xen_console_resume();
71 }
72
73 return 0;
74}
75
76static void do_suspend(void)
77{
78 int err;
79 int cancelled = 1;
80
81 shutting_down = SHUTDOWN_SUSPEND;
82
83#ifdef CONFIG_PREEMPT
84 /* If the kernel is preemptible, we need to freeze all the processes
85 to prevent them from being in the middle of a pagetable update
86 during suspend. */
87 err = freeze_processes();
88 if (err) {
89 printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
90 return;
91 }
92#endif
93
94 err = device_suspend(PMSG_SUSPEND);
95 if (err) {
96 printk(KERN_ERR "xen suspend: device_suspend %d\n", err);
97 goto out;
98 }
99
100 printk("suspending xenbus...\n");
101 /* XXX use normal device tree? */
102 xenbus_suspend();
103
104 err = stop_machine_run(xen_suspend, &cancelled, 0);
105 if (err) {
106 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
107 goto out;
108 }
109
110 if (!cancelled)
111 xenbus_resume();
112 else
113 xenbus_suspend_cancel();
114
115 device_resume();
116
117 /* Make sure timer events get retriggered on all CPUs */
118 clock_was_set();
119out:
120#ifdef CONFIG_PREEMPT
121 thaw_processes();
122#endif
123 shutting_down = SHUTDOWN_INVALID;
124}
125#endif /* CONFIG_PM_SLEEP */
126
127static void shutdown_handler(struct xenbus_watch *watch,
128 const char **vec, unsigned int len)
129{
130 char *str;
131 struct xenbus_transaction xbt;
132 int err;
133
134 if (shutting_down != SHUTDOWN_INVALID)
135 return;
136
137 again:
138 err = xenbus_transaction_start(&xbt);
139 if (err)
140 return;
141
142 str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
143 /* Ignore read errors and empty reads. */
144 if (XENBUS_IS_ERR_READ(str)) {
145 xenbus_transaction_end(xbt, 1);
146 return;
147 }
148
149 xenbus_write(xbt, "control", "shutdown", "");
150
151 err = xenbus_transaction_end(xbt, 0);
152 if (err == -EAGAIN) {
153 kfree(str);
154 goto again;
155 }
156
157 if (strcmp(str, "poweroff") == 0 ||
158 strcmp(str, "halt") == 0) {
159 shutting_down = SHUTDOWN_POWEROFF;
160 orderly_poweroff(false);
161 } else if (strcmp(str, "reboot") == 0) {
162 shutting_down = SHUTDOWN_POWEROFF; /* ? */
163 ctrl_alt_del();
164#ifdef CONFIG_PM_SLEEP
165 } else if (strcmp(str, "suspend") == 0) {
166 do_suspend();
167#endif
168 } else {
169 printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
170 shutting_down = SHUTDOWN_INVALID;
171 }
172
173 kfree(str);
174}
175
176static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
177 unsigned int len)
178{
179 char sysrq_key = '\0';
180 struct xenbus_transaction xbt;
181 int err;
182
183 again:
184 err = xenbus_transaction_start(&xbt);
185 if (err)
186 return;
187 if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
188 printk(KERN_ERR "Unable to read sysrq code in "
189 "control/sysrq\n");
190 xenbus_transaction_end(xbt, 1);
191 return;
192 }
193
194 if (sysrq_key != '\0')
195 xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
196
197 err = xenbus_transaction_end(xbt, 0);
198 if (err == -EAGAIN)
199 goto again;
200
201 if (sysrq_key != '\0')
202 handle_sysrq(sysrq_key, NULL);
203}
204
205static struct xenbus_watch shutdown_watch = {
206 .node = "control/shutdown",
207 .callback = shutdown_handler
208};
209
210static struct xenbus_watch sysrq_watch = {
211 .node = "control/sysrq",
212 .callback = sysrq_handler
213};
214
215static int setup_shutdown_watcher(void)
216{
217 int err;
218
219 err = register_xenbus_watch(&shutdown_watch);
220 if (err) {
221 printk(KERN_ERR "Failed to set shutdown watcher\n");
222 return err;
223 }
224
225 err = register_xenbus_watch(&sysrq_watch);
226 if (err) {
227 printk(KERN_ERR "Failed to set sysrq watcher\n");
228 return err;
229 }
230
231 return 0;
232}
233
234static int shutdown_event(struct notifier_block *notifier,
235 unsigned long event,
236 void *data)
237{
238 setup_shutdown_watcher();
239 return NOTIFY_DONE;
240}
241
242static int __init setup_shutdown_event(void)
243{
244 static struct notifier_block xenstore_notifier = {
245 .notifier_call = shutdown_event
246 };
247 register_xenstore_notifier(&xenstore_notifier);
248
249 return 0;
250}
251
252subsys_initcall(setup_shutdown_event);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 6efbe3f29ca5..090c61ee8fd0 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -203,7 +203,6 @@ int xb_read(void *data, unsigned len)
203int xb_init_comms(void) 203int xb_init_comms(void)
204{ 204{
205 struct xenstore_domain_interface *intf = xen_store_interface; 205 struct xenstore_domain_interface *intf = xen_store_interface;
206 int err;
207 206
208 if (intf->req_prod != intf->req_cons) 207 if (intf->req_prod != intf->req_cons)
209 printk(KERN_ERR "XENBUS request ring is not quiescent " 208 printk(KERN_ERR "XENBUS request ring is not quiescent "
@@ -216,18 +215,20 @@ int xb_init_comms(void)
216 intf->rsp_cons = intf->rsp_prod; 215 intf->rsp_cons = intf->rsp_prod;
217 } 216 }
218 217
219 if (xenbus_irq) 218 if (xenbus_irq) {
220 unbind_from_irqhandler(xenbus_irq, &xb_waitq); 219 /* Already have an irq; assume we're resuming */
220 rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
221 } else {
222 int err;
223 err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
224 0, "xenbus", &xb_waitq);
225 if (err <= 0) {
226 printk(KERN_ERR "XENBUS request irq failed %i\n", err);
227 return err;
228 }
221 229
222 err = bind_evtchn_to_irqhandler( 230 xenbus_irq = err;
223 xen_store_evtchn, wake_waiting,
224 0, "xenbus", &xb_waitq);
225 if (err <= 0) {
226 printk(KERN_ERR "XENBUS request irq failed %i\n", err);
227 return err;
228 } 231 }
229 232
230 xenbus_irq = err;
231
232 return 0; 233 return 0;
233} 234}
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index dc936dddf161..a1e2b9470f25 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -160,6 +160,7 @@ static inline pteval_t native_pte_val(pte_t pte)
160#endif 160#endif
161 161
162#define pte_val(x) native_pte_val(x) 162#define pte_val(x) native_pte_val(x)
163#define pte_flags(x) native_pte_val(x)
163#define __pte(x) native_make_pte(x) 164#define __pte(x) native_make_pte(x)
164 165
165#endif /* CONFIG_PARAVIRT */ 166#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 0f13b945e240..5ea37a48eecb 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -239,6 +239,7 @@ struct pv_mmu_ops {
239 unsigned long addr, pte_t *ptep); 239 unsigned long addr, pte_t *ptep);
240 240
241 pteval_t (*pte_val)(pte_t); 241 pteval_t (*pte_val)(pte_t);
242 pteval_t (*pte_flags)(pte_t);
242 pte_t (*make_pte)(pteval_t pte); 243 pte_t (*make_pte)(pteval_t pte);
243 244
244 pgdval_t (*pgd_val)(pgd_t); 245 pgdval_t (*pgd_val)(pgd_t);
@@ -996,6 +997,20 @@ static inline pteval_t pte_val(pte_t pte)
996 return ret; 997 return ret;
997} 998}
998 999
1000static inline pteval_t pte_flags(pte_t pte)
1001{
1002 pteval_t ret;
1003
1004 if (sizeof(pteval_t) > sizeof(long))
1005 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1006 pte.pte, (u64)pte.pte >> 32);
1007 else
1008 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1009 pte.pte);
1010
1011 return ret;
1012}
1013
999static inline pgd_t __pgd(pgdval_t val) 1014static inline pgd_t __pgd(pgdval_t val)
1000{ 1015{
1001 pgdval_t ret; 1016 pgdval_t ret;
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 97c271b2910b..47a852cb8c92 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -164,37 +164,37 @@ extern struct list_head pgd_list;
164 */ 164 */
165static inline int pte_dirty(pte_t pte) 165static inline int pte_dirty(pte_t pte)
166{ 166{
167 return pte_val(pte) & _PAGE_DIRTY; 167 return pte_flags(pte) & _PAGE_DIRTY;
168} 168}
169 169
170static inline int pte_young(pte_t pte) 170static inline int pte_young(pte_t pte)
171{ 171{
172 return pte_val(pte) & _PAGE_ACCESSED; 172 return pte_flags(pte) & _PAGE_ACCESSED;
173} 173}
174 174
175static inline int pte_write(pte_t pte) 175static inline int pte_write(pte_t pte)
176{ 176{
177 return pte_val(pte) & _PAGE_RW; 177 return pte_flags(pte) & _PAGE_RW;
178} 178}
179 179
180static inline int pte_file(pte_t pte) 180static inline int pte_file(pte_t pte)
181{ 181{
182 return pte_val(pte) & _PAGE_FILE; 182 return pte_flags(pte) & _PAGE_FILE;
183} 183}
184 184
185static inline int pte_huge(pte_t pte) 185static inline int pte_huge(pte_t pte)
186{ 186{
187 return pte_val(pte) & _PAGE_PSE; 187 return pte_flags(pte) & _PAGE_PSE;
188} 188}
189 189
190static inline int pte_global(pte_t pte) 190static inline int pte_global(pte_t pte)
191{ 191{
192 return pte_val(pte) & _PAGE_GLOBAL; 192 return pte_flags(pte) & _PAGE_GLOBAL;
193} 193}
194 194
195static inline int pte_exec(pte_t pte) 195static inline int pte_exec(pte_t pte)
196{ 196{
197 return !(pte_val(pte) & _PAGE_NX); 197 return !(pte_flags(pte) & _PAGE_NX);
198} 198}
199 199
200static inline int pte_special(pte_t pte) 200static inline int pte_special(pte_t pte)
@@ -305,7 +305,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
305 return __pgprot(preservebits | addbits); 305 return __pgprot(preservebits | addbits);
306} 306}
307 307
308#define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK) 308#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK)
309 309
310#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 310#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
311 311
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index c2ccd997ed35..2a4f9b41d684 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -176,9 +176,9 @@ HYPERVISOR_fpu_taskswitch(int set)
176} 176}
177 177
178static inline int 178static inline int
179HYPERVISOR_sched_op(int cmd, unsigned long arg) 179HYPERVISOR_sched_op(int cmd, void *arg)
180{ 180{
181 return _hypercall2(int, sched_op, cmd, arg); 181 return _hypercall2(int, sched_op_new, cmd, arg);
182} 182}
183 183
184static inline long 184static inline long
@@ -315,6 +315,13 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
315} 315}
316 316
317static inline void 317static inline void
318MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
319{
320 mcl->op = __HYPERVISOR_fpu_taskswitch;
321 mcl->args[0] = set;
322}
323
324static inline void
318MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 325MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
319 pte_t new_val, unsigned long flags) 326 pte_t new_val, unsigned long flags)
320{ 327{
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index baf3a4dce28c..377c04591c15 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -26,15 +26,20 @@ typedef struct xpaddr {
26#define FOREIGN_FRAME_BIT (1UL<<31) 26#define FOREIGN_FRAME_BIT (1UL<<31)
27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
28 28
29extern unsigned long *phys_to_machine_mapping; 29/* Maximum amount of memory we can handle in a domain in pages */
30#define MAX_DOMAIN_PAGES \
31 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
32
33
34extern unsigned long get_phys_to_machine(unsigned long pfn);
35extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
30 36
31static inline unsigned long pfn_to_mfn(unsigned long pfn) 37static inline unsigned long pfn_to_mfn(unsigned long pfn)
32{ 38{
33 if (xen_feature(XENFEAT_auto_translated_physmap)) 39 if (xen_feature(XENFEAT_auto_translated_physmap))
34 return pfn; 40 return pfn;
35 41
36 return phys_to_machine_mapping[(unsigned int)(pfn)] & 42 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
37 ~FOREIGN_FRAME_BIT;
38} 43}
39 44
40static inline int phys_to_machine_mapping_valid(unsigned long pfn) 45static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -42,7 +47,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
42 if (xen_feature(XENFEAT_auto_translated_physmap)) 47 if (xen_feature(XENFEAT_auto_translated_physmap))
43 return 1; 48 return 1;
44 49
45 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); 50 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
46} 51}
47 52
48static inline unsigned long mfn_to_pfn(unsigned long mfn) 53static inline unsigned long mfn_to_pfn(unsigned long mfn)
@@ -106,20 +111,12 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
106 unsigned long pfn = mfn_to_pfn(mfn); 111 unsigned long pfn = mfn_to_pfn(mfn);
107 if ((pfn < max_mapnr) 112 if ((pfn < max_mapnr)
108 && !xen_feature(XENFEAT_auto_translated_physmap) 113 && !xen_feature(XENFEAT_auto_translated_physmap)
109 && (phys_to_machine_mapping[pfn] != mfn)) 114 && (get_phys_to_machine(pfn) != mfn))
110 return max_mapnr; /* force !pfn_valid() */ 115 return max_mapnr; /* force !pfn_valid() */
116 /* XXX fixme; not true with sparsemem */
111 return pfn; 117 return pfn;
112} 118}
113 119
114static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
115{
116 if (xen_feature(XENFEAT_auto_translated_physmap)) {
117 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
118 return;
119 }
120 phys_to_machine_mapping[pfn] = mfn;
121}
122
123/* VIRT <-> MACHINE conversion */ 120/* VIRT <-> MACHINE conversion */
124#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 121#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
125#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) 122#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
@@ -150,13 +147,9 @@ static inline pte_t __pte_ma(pteval_t x)
150 return (pte_t) { .pte = x }; 147 return (pte_t) { .pte = x };
151} 148}
152 149
153#ifdef CONFIG_X86_PAE
154#define pmd_val_ma(v) ((v).pmd) 150#define pmd_val_ma(v) ((v).pmd)
155#define pud_val_ma(v) ((v).pgd.pgd) 151#define pud_val_ma(v) ((v).pgd.pgd)
156#define __pmd_ma(x) ((pmd_t) { (x) } ) 152#define __pmd_ma(x) ((pmd_t) { (x) } )
157#else /* !X86_PAE */
158#define pmd_val_ma(v) ((v).pud.pgd.pgd)
159#endif /* CONFIG_X86_PAE */
160 153
161#define pgd_val_ma(x) ((x).pgd) 154#define pgd_val_ma(x) ((x).pgd)
162 155
diff --git a/include/linux/console.h b/include/linux/console.h
index a4f27fbdf549..248e6e3b9b73 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -108,6 +108,8 @@ struct console {
108 struct console *next; 108 struct console *next;
109}; 109};
110 110
111extern int console_set_on_cmdline;
112
111extern int add_preferred_console(char *name, int idx, char *options); 113extern int add_preferred_console(char *name, int idx, char *options);
112extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); 114extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
113extern void register_console(struct console *); 115extern void register_console(struct console *);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f31debfac926..0d2a4e7012aa 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -157,6 +157,7 @@ PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
157__PAGEFLAG(Slab, slab) 157__PAGEFLAG(Slab, slab)
158PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ 158PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */
159PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ 159PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */
160PAGEFLAG(SavePinned, dirty); /* Xen */
160PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 161PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
161PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) 162PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
162 __SETPAGEFLAG(Private, private) 163 __SETPAGEFLAG(Private, private)
diff --git a/include/xen/events.h b/include/xen/events.h
index acd8e062c85f..67c4436554a9 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -32,6 +32,7 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id);
32 32
33void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); 33void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
34int resend_irq_on_evtchn(unsigned int irq); 34int resend_irq_on_evtchn(unsigned int irq);
35void rebind_evtchn_irq(int evtchn, int irq);
35 36
36static inline void notify_remote_via_evtchn(int port) 37static inline void notify_remote_via_evtchn(int port)
37{ 38{
@@ -40,4 +41,7 @@ static inline void notify_remote_via_evtchn(int port)
40} 41}
41 42
42extern void notify_remote_via_irq(int irq); 43extern void notify_remote_via_irq(int irq);
44
45extern void xen_irq_resume(void);
46
43#endif /* _XEN_EVENTS_H */ 47#endif /* _XEN_EVENTS_H */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 466204846121..a40f1cd91be1 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -51,6 +51,9 @@ struct gnttab_free_callback {
51 u16 count; 51 u16 count;
52}; 52};
53 53
54int gnttab_suspend(void);
55int gnttab_resume(void);
56
54int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 57int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
55 int readonly); 58 int readonly);
56 59
diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h
index 21c0ecfd786d..98b79bc404dd 100644
--- a/include/xen/hvc-console.h
+++ b/include/xen/hvc-console.h
@@ -3,4 +3,13 @@
3 3
4extern struct console xenboot_console; 4extern struct console xenboot_console;
5 5
6#ifdef CONFIG_HVC_XEN
7void xen_console_resume(void);
8#else
9static inline void xen_console_resume(void) { }
10#endif
11
12void xen_raw_console_write(const char *str);
13void xen_raw_printk(const char *fmt, ...);
14
6#endif /* XEN_HVC_CONSOLE_H */ 15#endif /* XEN_HVC_CONSOLE_H */
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index a64d3df5bd95..7a8262c375cc 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -120,6 +120,26 @@
120 */ 120 */
121#define XEN_ELFNOTE_BSD_SYMTAB 11 121#define XEN_ELFNOTE_BSD_SYMTAB 11
122 122
123/*
124 * The lowest address the hypervisor hole can begin at (numeric).
125 *
126 * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
127 * also indicates to the hypervisor that the kernel can deal with the
128 * hole starting at a higher address.
129 */
130#define XEN_ELFNOTE_HV_START_LOW 12
131
132/*
133 * List of maddr_t-sized mask/value pairs describing how to recognize
134 * (non-present) L1 page table entries carrying valid MFNs (numeric).
135 */
136#define XEN_ELFNOTE_L1_MFN_VALID 13
137
138/*
139 * Whether or not the guest supports cooperative suspend cancellation.
140 */
141#define XEN_ELFNOTE_SUSPEND_CANCEL 14
142
123#endif /* __XEN_PUBLIC_ELFNOTE_H__ */ 143#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
124 144
125/* 145/*
diff --git a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h
index 5a934dd7796d..974a51ed9165 100644
--- a/include/xen/interface/io/fbif.h
+++ b/include/xen/interface/io/fbif.h
@@ -49,11 +49,27 @@ struct xenfb_update {
49 int32_t height; /* rect height */ 49 int32_t height; /* rect height */
50}; 50};
51 51
52/*
53 * Framebuffer resize notification event
54 * Capable backend sets feature-resize in xenstore.
55 */
56#define XENFB_TYPE_RESIZE 3
57
58struct xenfb_resize {
59 uint8_t type; /* XENFB_TYPE_RESIZE */
60 int32_t width; /* width in pixels */
61 int32_t height; /* height in pixels */
62 int32_t stride; /* stride in bytes */
63 int32_t depth; /* depth in bits */
64 int32_t offset; /* start offset within framebuffer */
65};
66
52#define XENFB_OUT_EVENT_SIZE 40 67#define XENFB_OUT_EVENT_SIZE 40
53 68
54union xenfb_out_event { 69union xenfb_out_event {
55 uint8_t type; 70 uint8_t type;
56 struct xenfb_update update; 71 struct xenfb_update update;
72 struct xenfb_resize resize;
57 char pad[XENFB_OUT_EVENT_SIZE]; 73 char pad[XENFB_OUT_EVENT_SIZE];
58}; 74};
59 75
@@ -105,15 +121,18 @@ struct xenfb_page {
105 * Each directory page holds PAGE_SIZE / sizeof(*pd) 121 * Each directory page holds PAGE_SIZE / sizeof(*pd)
106 * framebuffer pages, and can thus map up to PAGE_SIZE * 122 * framebuffer pages, and can thus map up to PAGE_SIZE *
107 * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and 123 * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
108 * sizeof(unsigned long) == 4, that's 4 Megs. Two directory 124 * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2
109 * pages should be enough for a while. 125 * Megs 64 bit. 256 directories give enough room for a 512
126 * Meg framebuffer with a max resolution of 12,800x10,240.
127 * Should be enough for a while with room leftover for
128 * expansion.
110 */ 129 */
111 unsigned long pd[2]; 130 unsigned long pd[256];
112}; 131};
113 132
114/* 133/*
115 * Wart: xenkbd needs to know resolution. Put it here until a better 134 * Wart: xenkbd needs to know default resolution. Put it here until a
116 * solution is found, but don't leak it to the backend. 135 * better solution is found, but don't leak it to the backend.
117 */ 136 */
118#ifdef __KERNEL__ 137#ifdef __KERNEL__
119#define XENFB_WIDTH 800 138#define XENFB_WIDTH 800
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index fb97f4284ffd..8066c7849fbe 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -49,6 +49,7 @@ struct xenkbd_motion {
49 uint8_t type; /* XENKBD_TYPE_MOTION */ 49 uint8_t type; /* XENKBD_TYPE_MOTION */
50 int32_t rel_x; /* relative X motion */ 50 int32_t rel_x; /* relative X motion */
51 int32_t rel_y; /* relative Y motion */ 51 int32_t rel_y; /* relative Y motion */
52 int32_t rel_z; /* relative Z motion (wheel) */
52}; 53};
53 54
54struct xenkbd_key { 55struct xenkbd_key {
@@ -61,6 +62,7 @@ struct xenkbd_position {
61 uint8_t type; /* XENKBD_TYPE_POS */ 62 uint8_t type; /* XENKBD_TYPE_POS */
62 int32_t abs_x; /* absolute X position (in FB pixels) */ 63 int32_t abs_x; /* absolute X position (in FB pixels) */
63 int32_t abs_y; /* absolute Y position (in FB pixels) */ 64 int32_t abs_y; /* absolute Y position (in FB pixels) */
65 int32_t rel_z; /* relative Z motion (wheel) */
64}; 66};
65 67
66#define XENKBD_IN_EVENT_SIZE 40 68#define XENKBD_IN_EVENT_SIZE 40
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index da768469aa92..af36ead16817 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -29,7 +29,7 @@ struct xen_memory_reservation {
29 * OUT: GMFN bases of extents that were allocated 29 * OUT: GMFN bases of extents that were allocated
30 * (NB. This command also updates the mach_to_phys translation table) 30 * (NB. This command also updates the mach_to_phys translation table)
31 */ 31 */
32 ulong extent_start; 32 GUEST_HANDLE(ulong) extent_start;
33 33
34 /* Number of extents, and size/alignment of each (2^extent_order pages). */ 34 /* Number of extents, and size/alignment of each (2^extent_order pages). */
35 unsigned long nr_extents; 35 unsigned long nr_extents;
@@ -50,6 +50,7 @@ struct xen_memory_reservation {
50 domid_t domid; 50 domid_t domid;
51 51
52}; 52};
53DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
53 54
54/* 55/*
55 * Returns the maximum machine frame number of mapped RAM in this system. 56 * Returns the maximum machine frame number of mapped RAM in this system.
@@ -85,7 +86,7 @@ struct xen_machphys_mfn_list {
85 * any large discontiguities in the machine address space, 2MB gaps in 86 * any large discontiguities in the machine address space, 2MB gaps in
86 * the machphys table will be represented by an MFN base of zero. 87 * the machphys table will be represented by an MFN base of zero.
87 */ 88 */
88 ulong extent_start; 89 GUEST_HANDLE(ulong) extent_start;
89 90
90 /* 91 /*
91 * Number of extents written to the above array. This will be smaller 92 * Number of extents written to the above array. This will be smaller
@@ -93,6 +94,7 @@ struct xen_machphys_mfn_list {
93 */ 94 */
94 unsigned int nr_extents; 95 unsigned int nr_extents;
95}; 96};
97DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
96 98
97/* 99/*
98 * Sets the GPFN at which a particular page appears in the specified guest's 100 * Sets the GPFN at which a particular page appears in the specified guest's
@@ -115,6 +117,7 @@ struct xen_add_to_physmap {
115 /* GPFN where the source mapping page should appear. */ 117 /* GPFN where the source mapping page should appear. */
116 unsigned long gpfn; 118 unsigned long gpfn;
117}; 119};
120DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
118 121
119/* 122/*
120 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error 123 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
@@ -129,13 +132,14 @@ struct xen_translate_gpfn_list {
129 unsigned long nr_gpfns; 132 unsigned long nr_gpfns;
130 133
131 /* List of GPFNs to translate. */ 134 /* List of GPFNs to translate. */
132 ulong gpfn_list; 135 GUEST_HANDLE(ulong) gpfn_list;
133 136
134 /* 137 /*
135 * Output list to contain MFN translations. May be the same as the input 138 * Output list to contain MFN translations. May be the same as the input
136 * list (in which case each input GPFN is overwritten with the output MFN). 139 * list (in which case each input GPFN is overwritten with the output MFN).
137 */ 140 */
138 ulong mfn_list; 141 GUEST_HANDLE(ulong) mfn_list;
139}; 142};
143DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
140 144
141#endif /* __XEN_PUBLIC_MEMORY_H__ */ 145#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 10ddfe0142d0..a706d6a78960 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,4 +5,10 @@
5 5
6DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 6DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
7 7
8void xen_pre_suspend(void);
9void xen_post_suspend(int suspend_cancelled);
10
11void xen_mm_pin_all(void);
12void xen_mm_unpin_all(void);
13
8#endif /* INCLUDE_XEN_OPS_H */ 14#endif /* INCLUDE_XEN_OPS_H */
diff --git a/kernel/printk.c b/kernel/printk.c
index 8fb01c32aa3b..028ed75d4864 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -121,6 +121,8 @@ struct console_cmdline
121static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 121static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
122static int selected_console = -1; 122static int selected_console = -1;
123static int preferred_console = -1; 123static int preferred_console = -1;
124int console_set_on_cmdline;
125EXPORT_SYMBOL(console_set_on_cmdline);
124 126
125/* Flag: console code may call schedule() */ 127/* Flag: console code may call schedule() */
126static int console_may_schedule; 128static int console_may_schedule;
@@ -890,6 +892,7 @@ static int __init console_setup(char *str)
890 *s = 0; 892 *s = 0;
891 893
892 __add_preferred_console(buf, idx, options, brl_options); 894 __add_preferred_console(buf, idx, options, brl_options);
895 console_set_on_cmdline = 1;
893 return 1; 896 return 1;
894} 897}
895__setup("console=", console_setup); 898__setup("console=", console_setup);