diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/x86/xen | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/debugfs.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 49 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 24 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 47 | ||||
-rw-r--r-- | arch/x86/xen/spinlock.c | 17 | ||||
-rw-r--r-- | arch/x86/xen/suspend.c | 17 | ||||
-rw-r--r-- | arch/x86/xen/time.c | 32 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm_32.S | 4 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm_64.S | 4 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 2 |
10 files changed, 108 insertions, 89 deletions
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c index e133ce25e290..1304bcec8ee5 100644 --- a/arch/x86/xen/debugfs.c +++ b/arch/x86/xen/debugfs.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/debugfs.h> | 2 | #include <linux/debugfs.h> |
3 | #include <linux/slab.h> | ||
3 | #include <linux/module.h> | 4 | #include <linux/module.h> |
4 | 5 | ||
5 | #include "debugfs.h" | 6 | #include "debugfs.h" |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index dfbf70e65860..65d8d79b46a8 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -27,7 +27,10 @@ | |||
27 | #include <linux/page-flags.h> | 27 | #include <linux/page-flags.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/console.h> | 29 | #include <linux/console.h> |
30 | #include <linux/pci.h> | ||
31 | #include <linux/gfp.h> | ||
30 | 32 | ||
33 | #include <xen/xen.h> | ||
31 | #include <xen/interface/xen.h> | 34 | #include <xen/interface/xen.h> |
32 | #include <xen/interface/version.h> | 35 | #include <xen/interface/version.h> |
33 | #include <xen/interface/physdev.h> | 36 | #include <xen/interface/physdev.h> |
@@ -48,6 +51,7 @@ | |||
48 | #include <asm/traps.h> | 51 | #include <asm/traps.h> |
49 | #include <asm/setup.h> | 52 | #include <asm/setup.h> |
50 | #include <asm/desc.h> | 53 | #include <asm/desc.h> |
54 | #include <asm/pgalloc.h> | ||
51 | #include <asm/pgtable.h> | 55 | #include <asm/pgtable.h> |
52 | #include <asm/tlbflush.h> | 56 | #include <asm/tlbflush.h> |
53 | #include <asm/reboot.h> | 57 | #include <asm/reboot.h> |
@@ -138,24 +142,23 @@ static void xen_vcpu_setup(int cpu) | |||
138 | */ | 142 | */ |
139 | void xen_vcpu_restore(void) | 143 | void xen_vcpu_restore(void) |
140 | { | 144 | { |
141 | if (have_vcpu_info_placement) { | 145 | int cpu; |
142 | int cpu; | ||
143 | 146 | ||
144 | for_each_online_cpu(cpu) { | 147 | for_each_online_cpu(cpu) { |
145 | bool other_cpu = (cpu != smp_processor_id()); | 148 | bool other_cpu = (cpu != smp_processor_id()); |
146 | 149 | ||
147 | if (other_cpu && | 150 | if (other_cpu && |
148 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) | 151 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) |
149 | BUG(); | 152 | BUG(); |
150 | 153 | ||
151 | xen_vcpu_setup(cpu); | 154 | xen_setup_runstate_info(cpu); |
152 | 155 | ||
153 | if (other_cpu && | 156 | if (have_vcpu_info_placement) |
154 | HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) | 157 | xen_vcpu_setup(cpu); |
155 | BUG(); | ||
156 | } | ||
157 | 158 | ||
158 | BUG_ON(!have_vcpu_info_placement); | 159 | if (other_cpu && |
160 | HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) | ||
161 | BUG(); | ||
159 | } | 162 | } |
160 | } | 163 | } |
161 | 164 | ||
@@ -1093,10 +1096,14 @@ asmlinkage void __init xen_start_kernel(void) | |||
1093 | 1096 | ||
1094 | __supported_pte_mask |= _PAGE_IOMAP; | 1097 | __supported_pte_mask |= _PAGE_IOMAP; |
1095 | 1098 | ||
1096 | #ifdef CONFIG_X86_64 | 1099 | /* |
1100 | * Prevent page tables from being allocated in highmem, even | ||
1101 | * if CONFIG_HIGHPTE is enabled. | ||
1102 | */ | ||
1103 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; | ||
1104 | |||
1097 | /* Work out if we support NX */ | 1105 | /* Work out if we support NX */ |
1098 | check_efer(); | 1106 | x86_configure_nx(); |
1099 | #endif | ||
1100 | 1107 | ||
1101 | xen_setup_features(); | 1108 | xen_setup_features(); |
1102 | 1109 | ||
@@ -1152,9 +1159,13 @@ asmlinkage void __init xen_start_kernel(void) | |||
1152 | 1159 | ||
1153 | /* keep using Xen gdt for now; no urgent need to change it */ | 1160 | /* keep using Xen gdt for now; no urgent need to change it */ |
1154 | 1161 | ||
1162 | #ifdef CONFIG_X86_32 | ||
1155 | pv_info.kernel_rpl = 1; | 1163 | pv_info.kernel_rpl = 1; |
1156 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) | 1164 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) |
1157 | pv_info.kernel_rpl = 0; | 1165 | pv_info.kernel_rpl = 0; |
1166 | #else | ||
1167 | pv_info.kernel_rpl = 0; | ||
1168 | #endif | ||
1158 | 1169 | ||
1159 | /* set the limit of our address space */ | 1170 | /* set the limit of our address space */ |
1160 | xen_reserve_top(); | 1171 | xen_reserve_top(); |
@@ -1178,10 +1189,16 @@ asmlinkage void __init xen_start_kernel(void) | |||
1178 | add_preferred_console("xenboot", 0, NULL); | 1189 | add_preferred_console("xenboot", 0, NULL); |
1179 | add_preferred_console("tty", 0, NULL); | 1190 | add_preferred_console("tty", 0, NULL); |
1180 | add_preferred_console("hvc", 0, NULL); | 1191 | add_preferred_console("hvc", 0, NULL); |
1192 | } else { | ||
1193 | /* Make sure ACS will be enabled */ | ||
1194 | pci_request_acs(); | ||
1181 | } | 1195 | } |
1196 | |||
1182 | 1197 | ||
1183 | xen_raw_console_write("about to get started...\n"); | 1198 | xen_raw_console_write("about to get started...\n"); |
1184 | 1199 | ||
1200 | xen_setup_runstate_info(0); | ||
1201 | |||
1185 | /* Start the world */ | 1202 | /* Start the world */ |
1186 | #ifdef CONFIG_X86_32 | 1203 | #ifdef CONFIG_X86_32 |
1187 | i386_start_kernel(); | 1204 | i386_start_kernel(); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3bf7b1d250ce..914f04695ce5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/debugfs.h> | 43 | #include <linux/debugfs.h> |
44 | #include <linux/bug.h> | 44 | #include <linux/bug.h> |
45 | #include <linux/module.h> | 45 | #include <linux/module.h> |
46 | #include <linux/gfp.h> | ||
46 | 47 | ||
47 | #include <asm/pgtable.h> | 48 | #include <asm/pgtable.h> |
48 | #include <asm/tlbflush.h> | 49 | #include <asm/tlbflush.h> |
@@ -185,7 +186,7 @@ static inline unsigned p2m_index(unsigned long pfn) | |||
185 | } | 186 | } |
186 | 187 | ||
187 | /* Build the parallel p2m_top_mfn structures */ | 188 | /* Build the parallel p2m_top_mfn structures */ |
188 | static void __init xen_build_mfn_list_list(void) | 189 | void xen_build_mfn_list_list(void) |
189 | { | 190 | { |
190 | unsigned pfn, idx; | 191 | unsigned pfn, idx; |
191 | 192 | ||
@@ -1427,23 +1428,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1427 | #endif | 1428 | #endif |
1428 | } | 1429 | } |
1429 | 1430 | ||
1430 | #ifdef CONFIG_HIGHPTE | ||
1431 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | ||
1432 | { | ||
1433 | pgprot_t prot = PAGE_KERNEL; | ||
1434 | |||
1435 | if (PagePinned(page)) | ||
1436 | prot = PAGE_KERNEL_RO; | ||
1437 | |||
1438 | if (0 && PageHighMem(page)) | ||
1439 | printk("mapping highpte %lx type %d prot %s\n", | ||
1440 | page_to_pfn(page), type, | ||
1441 | (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); | ||
1442 | |||
1443 | return kmap_atomic_prot(page, type, prot); | ||
1444 | } | ||
1445 | #endif | ||
1446 | |||
1447 | #ifdef CONFIG_X86_32 | 1431 | #ifdef CONFIG_X86_32 |
1448 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1432 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
1449 | { | 1433 | { |
@@ -1902,10 +1886,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1902 | .alloc_pmd_clone = paravirt_nop, | 1886 | .alloc_pmd_clone = paravirt_nop, |
1903 | .release_pmd = xen_release_pmd_init, | 1887 | .release_pmd = xen_release_pmd_init, |
1904 | 1888 | ||
1905 | #ifdef CONFIG_HIGHPTE | ||
1906 | .kmap_atomic_pte = xen_kmap_atomic_pte, | ||
1907 | #endif | ||
1908 | |||
1909 | #ifdef CONFIG_X86_64 | 1889 | #ifdef CONFIG_X86_64 |
1910 | .set_pte = xen_set_pte, | 1890 | .set_pte = xen_set_pte, |
1911 | #else | 1891 | #else |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index fe03eeed7b48..a29693fd3138 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
18 | 19 | ||
19 | #include <asm/paravirt.h> | 20 | #include <asm/paravirt.h> |
@@ -35,10 +36,10 @@ | |||
35 | 36 | ||
36 | cpumask_var_t xen_cpu_initialized_map; | 37 | cpumask_var_t xen_cpu_initialized_map; |
37 | 38 | ||
38 | static DEFINE_PER_CPU(int, resched_irq); | 39 | static DEFINE_PER_CPU(int, xen_resched_irq); |
39 | static DEFINE_PER_CPU(int, callfunc_irq); | 40 | static DEFINE_PER_CPU(int, xen_callfunc_irq); |
40 | static DEFINE_PER_CPU(int, callfuncsingle_irq); | 41 | static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); |
41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | 42 | static DEFINE_PER_CPU(int, xen_debug_irq) = -1; |
42 | 43 | ||
43 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 44 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
44 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | 45 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
@@ -73,7 +74,7 @@ static __cpuinit void cpu_bringup(void) | |||
73 | 74 | ||
74 | xen_setup_cpu_clockevents(); | 75 | xen_setup_cpu_clockevents(); |
75 | 76 | ||
76 | cpu_set(cpu, cpu_online_map); | 77 | set_cpu_online(cpu, true); |
77 | percpu_write(cpu_state, CPU_ONLINE); | 78 | percpu_write(cpu_state, CPU_ONLINE); |
78 | wmb(); | 79 | wmb(); |
79 | 80 | ||
@@ -103,7 +104,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
103 | NULL); | 104 | NULL); |
104 | if (rc < 0) | 105 | if (rc < 0) |
105 | goto fail; | 106 | goto fail; |
106 | per_cpu(resched_irq, cpu) = rc; | 107 | per_cpu(xen_resched_irq, cpu) = rc; |
107 | 108 | ||
108 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); | 109 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); |
109 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, | 110 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, |
@@ -114,7 +115,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
114 | NULL); | 115 | NULL); |
115 | if (rc < 0) | 116 | if (rc < 0) |
116 | goto fail; | 117 | goto fail; |
117 | per_cpu(callfunc_irq, cpu) = rc; | 118 | per_cpu(xen_callfunc_irq, cpu) = rc; |
118 | 119 | ||
119 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); | 120 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); |
120 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, | 121 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, |
@@ -122,7 +123,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
122 | debug_name, NULL); | 123 | debug_name, NULL); |
123 | if (rc < 0) | 124 | if (rc < 0) |
124 | goto fail; | 125 | goto fail; |
125 | per_cpu(debug_irq, cpu) = rc; | 126 | per_cpu(xen_debug_irq, cpu) = rc; |
126 | 127 | ||
127 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | 128 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); |
128 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | 129 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, |
@@ -133,19 +134,20 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
133 | NULL); | 134 | NULL); |
134 | if (rc < 0) | 135 | if (rc < 0) |
135 | goto fail; | 136 | goto fail; |
136 | per_cpu(callfuncsingle_irq, cpu) = rc; | 137 | per_cpu(xen_callfuncsingle_irq, cpu) = rc; |
137 | 138 | ||
138 | return 0; | 139 | return 0; |
139 | 140 | ||
140 | fail: | 141 | fail: |
141 | if (per_cpu(resched_irq, cpu) >= 0) | 142 | if (per_cpu(xen_resched_irq, cpu) >= 0) |
142 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 143 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
143 | if (per_cpu(callfunc_irq, cpu) >= 0) | 144 | if (per_cpu(xen_callfunc_irq, cpu) >= 0) |
144 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 145 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
145 | if (per_cpu(debug_irq, cpu) >= 0) | 146 | if (per_cpu(xen_debug_irq, cpu) >= 0) |
146 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 147 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
147 | if (per_cpu(callfuncsingle_irq, cpu) >= 0) | 148 | if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) |
148 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 149 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), |
150 | NULL); | ||
149 | 151 | ||
150 | return rc; | 152 | return rc; |
151 | } | 153 | } |
@@ -295,6 +297,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
295 | (unsigned long)task_stack_page(idle) - | 297 | (unsigned long)task_stack_page(idle) - |
296 | KERNEL_STACK_OFFSET + THREAD_SIZE; | 298 | KERNEL_STACK_OFFSET + THREAD_SIZE; |
297 | #endif | 299 | #endif |
300 | xen_setup_runstate_info(cpu); | ||
298 | xen_setup_timer(cpu); | 301 | xen_setup_timer(cpu); |
299 | xen_init_lock_cpu(cpu); | 302 | xen_init_lock_cpu(cpu); |
300 | 303 | ||
@@ -348,10 +351,10 @@ static void xen_cpu_die(unsigned int cpu) | |||
348 | current->state = TASK_UNINTERRUPTIBLE; | 351 | current->state = TASK_UNINTERRUPTIBLE; |
349 | schedule_timeout(HZ/10); | 352 | schedule_timeout(HZ/10); |
350 | } | 353 | } |
351 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 354 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
352 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 355 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
353 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 356 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
354 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 357 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); |
355 | xen_uninit_lock_cpu(cpu); | 358 | xen_uninit_lock_cpu(cpu); |
356 | xen_teardown_timer(cpu); | 359 | xen_teardown_timer(cpu); |
357 | 360 | ||
@@ -359,7 +362,7 @@ static void xen_cpu_die(unsigned int cpu) | |||
359 | alternatives_smp_switch(0); | 362 | alternatives_smp_switch(0); |
360 | } | 363 | } |
361 | 364 | ||
362 | static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */ | 365 | static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ |
363 | { | 366 | { |
364 | play_dead_common(); | 367 | play_dead_common(); |
365 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 368 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 36a5141108df..e0500646585d 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/debugfs.h> | 7 | #include <linux/debugfs.h> |
8 | #include <linux/log2.h> | 8 | #include <linux/log2.h> |
9 | #include <linux/gfp.h> | ||
9 | 10 | ||
10 | #include <asm/paravirt.h> | 11 | #include <asm/paravirt.h> |
11 | 12 | ||
@@ -120,14 +121,14 @@ struct xen_spinlock { | |||
120 | unsigned short spinners; /* count of waiting cpus */ | 121 | unsigned short spinners; /* count of waiting cpus */ |
121 | }; | 122 | }; |
122 | 123 | ||
123 | static int xen_spin_is_locked(struct raw_spinlock *lock) | 124 | static int xen_spin_is_locked(struct arch_spinlock *lock) |
124 | { | 125 | { |
125 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 126 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
126 | 127 | ||
127 | return xl->lock != 0; | 128 | return xl->lock != 0; |
128 | } | 129 | } |
129 | 130 | ||
130 | static int xen_spin_is_contended(struct raw_spinlock *lock) | 131 | static int xen_spin_is_contended(struct arch_spinlock *lock) |
131 | { | 132 | { |
132 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 133 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
133 | 134 | ||
@@ -136,7 +137,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock) | |||
136 | return xl->spinners != 0; | 137 | return xl->spinners != 0; |
137 | } | 138 | } |
138 | 139 | ||
139 | static int xen_spin_trylock(struct raw_spinlock *lock) | 140 | static int xen_spin_trylock(struct arch_spinlock *lock) |
140 | { | 141 | { |
141 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 142 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
142 | u8 old = 1; | 143 | u8 old = 1; |
@@ -181,7 +182,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock | |||
181 | __get_cpu_var(lock_spinners) = prev; | 182 | __get_cpu_var(lock_spinners) = prev; |
182 | } | 183 | } |
183 | 184 | ||
184 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) | 185 | static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) |
185 | { | 186 | { |
186 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 187 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
187 | struct xen_spinlock *prev; | 188 | struct xen_spinlock *prev; |
@@ -254,7 +255,7 @@ out: | |||
254 | return ret; | 255 | return ret; |
255 | } | 256 | } |
256 | 257 | ||
257 | static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) | 258 | static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) |
258 | { | 259 | { |
259 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 260 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
260 | unsigned timeout; | 261 | unsigned timeout; |
@@ -291,12 +292,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) | |||
291 | spin_time_accum_total(start_spin); | 292 | spin_time_accum_total(start_spin); |
292 | } | 293 | } |
293 | 294 | ||
294 | static void xen_spin_lock(struct raw_spinlock *lock) | 295 | static void xen_spin_lock(struct arch_spinlock *lock) |
295 | { | 296 | { |
296 | __xen_spin_lock(lock, false); | 297 | __xen_spin_lock(lock, false); |
297 | } | 298 | } |
298 | 299 | ||
299 | static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | 300 | static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) |
300 | { | 301 | { |
301 | __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); | 302 | __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); |
302 | } | 303 | } |
@@ -317,7 +318,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) | |||
317 | } | 318 | } |
318 | } | 319 | } |
319 | 320 | ||
320 | static void xen_spin_unlock(struct raw_spinlock *lock) | 321 | static void xen_spin_unlock(struct arch_spinlock *lock) |
321 | { | 322 | { |
322 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 323 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
323 | 324 | ||
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 95be7b434724..987267f79bf5 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
2 | #include <linux/clockchips.h> | ||
2 | 3 | ||
3 | #include <xen/interface/xen.h> | 4 | #include <xen/interface/xen.h> |
4 | #include <xen/grant_table.h> | 5 | #include <xen/grant_table.h> |
@@ -27,6 +28,8 @@ void xen_pre_suspend(void) | |||
27 | 28 | ||
28 | void xen_post_suspend(int suspend_cancelled) | 29 | void xen_post_suspend(int suspend_cancelled) |
29 | { | 30 | { |
31 | xen_build_mfn_list_list(); | ||
32 | |||
30 | xen_setup_shared_info(); | 33 | xen_setup_shared_info(); |
31 | 34 | ||
32 | if (suspend_cancelled) { | 35 | if (suspend_cancelled) { |
@@ -44,7 +47,19 @@ void xen_post_suspend(int suspend_cancelled) | |||
44 | 47 | ||
45 | } | 48 | } |
46 | 49 | ||
50 | static void xen_vcpu_notify_restore(void *data) | ||
51 | { | ||
52 | unsigned long reason = (unsigned long)data; | ||
53 | |||
54 | /* Boot processor notified via generic timekeeping_resume() */ | ||
55 | if ( smp_processor_id() == 0) | ||
56 | return; | ||
57 | |||
58 | clockevents_notify(reason, NULL); | ||
59 | } | ||
60 | |||
47 | void xen_arch_resume(void) | 61 | void xen_arch_resume(void) |
48 | { | 62 | { |
49 | /* nothing */ | 63 | smp_call_function(xen_vcpu_notify_restore, |
64 | (void *)CLOCK_EVT_NOTIFY_RESUME, 1); | ||
50 | } | 65 | } |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 0a5aa44299a5..32764b8880b5 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/clockchips.h> | 13 | #include <linux/clockchips.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/math64.h> | 15 | #include <linux/math64.h> |
16 | #include <linux/gfp.h> | ||
16 | 17 | ||
17 | #include <asm/pvclock.h> | 18 | #include <asm/pvclock.h> |
18 | #include <asm/xen/hypervisor.h> | 19 | #include <asm/xen/hypervisor.h> |
@@ -31,14 +32,14 @@ | |||
31 | #define NS_PER_TICK (1000000000LL / HZ) | 32 | #define NS_PER_TICK (1000000000LL / HZ) |
32 | 33 | ||
33 | /* runstate info updated by Xen */ | 34 | /* runstate info updated by Xen */ |
34 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | 35 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
35 | 36 | ||
36 | /* snapshots of runstate info */ | 37 | /* snapshots of runstate info */ |
37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot); | 38 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); |
38 | 39 | ||
39 | /* unused ns of stolen and blocked time */ | 40 | /* unused ns of stolen and blocked time */ |
40 | static DEFINE_PER_CPU(u64, residual_stolen); | 41 | static DEFINE_PER_CPU(u64, xen_residual_stolen); |
41 | static DEFINE_PER_CPU(u64, residual_blocked); | 42 | static DEFINE_PER_CPU(u64, xen_residual_blocked); |
42 | 43 | ||
43 | /* return an consistent snapshot of 64-bit time/counter value */ | 44 | /* return an consistent snapshot of 64-bit time/counter value */ |
44 | static u64 get64(const u64 *p) | 45 | static u64 get64(const u64 *p) |
@@ -79,7 +80,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
79 | 80 | ||
80 | BUG_ON(preemptible()); | 81 | BUG_ON(preemptible()); |
81 | 82 | ||
82 | state = &__get_cpu_var(runstate); | 83 | state = &__get_cpu_var(xen_runstate); |
83 | 84 | ||
84 | /* | 85 | /* |
85 | * The runstate info is always updated by the hypervisor on | 86 | * The runstate info is always updated by the hypervisor on |
@@ -97,14 +98,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
97 | /* return true when a vcpu could run but has no real cpu to run on */ | 98 | /* return true when a vcpu could run but has no real cpu to run on */ |
98 | bool xen_vcpu_stolen(int vcpu) | 99 | bool xen_vcpu_stolen(int vcpu) |
99 | { | 100 | { |
100 | return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; | 101 | return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; |
101 | } | 102 | } |
102 | 103 | ||
103 | static void setup_runstate_info(int cpu) | 104 | void xen_setup_runstate_info(int cpu) |
104 | { | 105 | { |
105 | struct vcpu_register_runstate_memory_area area; | 106 | struct vcpu_register_runstate_memory_area area; |
106 | 107 | ||
107 | area.addr.v = &per_cpu(runstate, cpu); | 108 | area.addr.v = &per_cpu(xen_runstate, cpu); |
108 | 109 | ||
109 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, | 110 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, |
110 | cpu, &area)) | 111 | cpu, &area)) |
@@ -122,7 +123,7 @@ static void do_stolen_accounting(void) | |||
122 | 123 | ||
123 | WARN_ON(state.state != RUNSTATE_running); | 124 | WARN_ON(state.state != RUNSTATE_running); |
124 | 125 | ||
125 | snap = &__get_cpu_var(runstate_snapshot); | 126 | snap = &__get_cpu_var(xen_runstate_snapshot); |
126 | 127 | ||
127 | /* work out how much time the VCPU has not been runn*ing* */ | 128 | /* work out how much time the VCPU has not been runn*ing* */ |
128 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; | 129 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; |
@@ -133,24 +134,24 @@ static void do_stolen_accounting(void) | |||
133 | 134 | ||
134 | /* Add the appropriate number of ticks of stolen time, | 135 | /* Add the appropriate number of ticks of stolen time, |
135 | including any left-overs from last time. */ | 136 | including any left-overs from last time. */ |
136 | stolen = runnable + offline + __get_cpu_var(residual_stolen); | 137 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); |
137 | 138 | ||
138 | if (stolen < 0) | 139 | if (stolen < 0) |
139 | stolen = 0; | 140 | stolen = 0; |
140 | 141 | ||
141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); | 142 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
142 | __get_cpu_var(residual_stolen) = stolen; | 143 | __get_cpu_var(xen_residual_stolen) = stolen; |
143 | account_steal_ticks(ticks); | 144 | account_steal_ticks(ticks); |
144 | 145 | ||
145 | /* Add the appropriate number of ticks of blocked time, | 146 | /* Add the appropriate number of ticks of blocked time, |
146 | including any left-overs from last time. */ | 147 | including any left-overs from last time. */ |
147 | blocked += __get_cpu_var(residual_blocked); | 148 | blocked += __get_cpu_var(xen_residual_blocked); |
148 | 149 | ||
149 | if (blocked < 0) | 150 | if (blocked < 0) |
150 | blocked = 0; | 151 | blocked = 0; |
151 | 152 | ||
152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); | 153 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
153 | __get_cpu_var(residual_blocked) = blocked; | 154 | __get_cpu_var(xen_residual_blocked) = blocked; |
154 | account_idle_ticks(ticks); | 155 | account_idle_ticks(ticks); |
155 | } | 156 | } |
156 | 157 | ||
@@ -434,7 +435,7 @@ void xen_setup_timer(int cpu) | |||
434 | name = "<timer kasprintf failed>"; | 435 | name = "<timer kasprintf failed>"; |
435 | 436 | ||
436 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | 437 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
437 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | 438 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, |
438 | name, NULL); | 439 | name, NULL); |
439 | 440 | ||
440 | evt = &per_cpu(xen_clock_events, cpu); | 441 | evt = &per_cpu(xen_clock_events, cpu); |
@@ -442,8 +443,6 @@ void xen_setup_timer(int cpu) | |||
442 | 443 | ||
443 | evt->cpumask = cpumask_of(cpu); | 444 | evt->cpumask = cpumask_of(cpu); |
444 | evt->irq = irq; | 445 | evt->irq = irq; |
445 | |||
446 | setup_runstate_info(cpu); | ||
447 | } | 446 | } |
448 | 447 | ||
449 | void xen_teardown_timer(int cpu) | 448 | void xen_teardown_timer(int cpu) |
@@ -494,6 +493,7 @@ __init void xen_time_init(void) | |||
494 | 493 | ||
495 | setup_force_cpu_cap(X86_FEATURE_TSC); | 494 | setup_force_cpu_cap(X86_FEATURE_TSC); |
496 | 495 | ||
496 | xen_setup_runstate_info(cpu); | ||
497 | xen_setup_timer(cpu); | 497 | xen_setup_timer(cpu); |
498 | xen_setup_cpu_clockevents(); | 498 | xen_setup_cpu_clockevents(); |
499 | } | 499 | } |
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 88e15deb8b82..22a2093b5862 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S | |||
@@ -90,9 +90,9 @@ ENTRY(xen_iret) | |||
90 | GET_THREAD_INFO(%eax) | 90 | GET_THREAD_INFO(%eax) |
91 | movl TI_cpu(%eax), %eax | 91 | movl TI_cpu(%eax), %eax |
92 | movl __per_cpu_offset(,%eax,4), %eax | 92 | movl __per_cpu_offset(,%eax,4), %eax |
93 | mov per_cpu__xen_vcpu(%eax), %eax | 93 | mov xen_vcpu(%eax), %eax |
94 | #else | 94 | #else |
95 | movl per_cpu__xen_vcpu, %eax | 95 | movl xen_vcpu, %eax |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | /* check IF state we're restoring */ | 98 | /* check IF state we're restoring */ |
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 02f496a8dbaa..53adefda4275 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
@@ -96,7 +96,7 @@ ENTRY(xen_sysret32) | |||
96 | pushq $__USER32_CS | 96 | pushq $__USER32_CS |
97 | pushq %rcx | 97 | pushq %rcx |
98 | 98 | ||
99 | pushq $VGCF_in_syscall | 99 | pushq $0 |
100 | 1: jmp hypercall_iret | 100 | 1: jmp hypercall_iret |
101 | ENDPATCH(xen_sysret32) | 101 | ENDPATCH(xen_sysret32) |
102 | RELOC(xen_sysret32, 1b+1) | 102 | RELOC(xen_sysret32, 1b+1) |
@@ -151,7 +151,7 @@ ENTRY(xen_syscall32_target) | |||
151 | ENTRY(xen_sysenter_target) | 151 | ENTRY(xen_sysenter_target) |
152 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ | 152 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ |
153 | mov $-ENOSYS, %rax | 153 | mov $-ENOSYS, %rax |
154 | pushq $VGCF_in_syscall | 154 | pushq $0 |
155 | jmp hypercall_iret | 155 | jmp hypercall_iret |
156 | ENDPROC(xen_syscall32_target) | 156 | ENDPROC(xen_syscall32_target) |
157 | ENDPROC(xen_sysenter_target) | 157 | ENDPROC(xen_sysenter_target) |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 355fa6b99c9c..f9153a300bce 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -25,6 +25,7 @@ extern struct shared_info *HYPERVISOR_shared_info; | |||
25 | 25 | ||
26 | void xen_setup_mfn_list_list(void); | 26 | void xen_setup_mfn_list_list(void); |
27 | void xen_setup_shared_info(void); | 27 | void xen_setup_shared_info(void); |
28 | void xen_build_mfn_list_list(void); | ||
28 | void xen_setup_machphys_mapping(void); | 29 | void xen_setup_machphys_mapping(void); |
29 | pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); | 30 | pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); |
30 | void xen_ident_map_ISA(void); | 31 | void xen_ident_map_ISA(void); |
@@ -41,6 +42,7 @@ void __init xen_build_dynamic_phys_to_machine(void); | |||
41 | 42 | ||
42 | void xen_init_irq_ops(void); | 43 | void xen_init_irq_ops(void); |
43 | void xen_setup_timer(int cpu); | 44 | void xen_setup_timer(int cpu); |
45 | void xen_setup_runstate_info(int cpu); | ||
44 | void xen_teardown_timer(int cpu); | 46 | void xen_teardown_timer(int cpu); |
45 | cycle_t xen_clocksource_read(void); | 47 | cycle_t xen_clocksource_read(void); |
46 | void xen_setup_cpu_clockevents(void); | 48 | void xen_setup_cpu_clockevents(void); |