aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorSage Weil <sage@inktank.com>2013-08-15 14:11:45 -0400
committerSage Weil <sage@inktank.com>2013-08-15 14:11:45 -0400
commitee3e542fec6e69bc9fb668698889a37d93950ddf (patch)
treee74ee766a4764769ef1d3d45d266b4dea64101d3 /arch/x86/xen
parentfe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff)
parentf1d6e17f540af37bb1891480143669ba7636c4cf (diff)
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/enlighten.c8
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/smp.c103
-rw-r--r--arch/x86/xen/spinlock.c9
-rw-r--r--arch/x86/xen/time.c116
-rw-r--r--arch/x86/xen/xen-ops.h2
6 files changed, 153 insertions, 91 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a492be2635ac..193097ef3d7d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1557,7 +1557,7 @@ asmlinkage void __init xen_start_kernel(void)
1557#ifdef CONFIG_X86_32 1557#ifdef CONFIG_X86_32
1558 /* set up basic CPUID stuff */ 1558 /* set up basic CPUID stuff */
1559 cpu_detect(&new_cpu_data); 1559 cpu_detect(&new_cpu_data);
1560 new_cpu_data.hard_math = 1; 1560 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1561 new_cpu_data.wp_works_ok = 1; 1561 new_cpu_data.wp_works_ok = 1;
1562 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1562 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1563#endif 1563#endif
@@ -1681,8 +1681,8 @@ static void __init init_hvm_pv_info(void)
1681 xen_domain_type = XEN_HVM_DOMAIN; 1681 xen_domain_type = XEN_HVM_DOMAIN;
1682} 1682}
1683 1683
1684static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, 1684static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1685 unsigned long action, void *hcpu) 1685 void *hcpu)
1686{ 1686{
1687 int cpu = (long)hcpu; 1687 int cpu = (long)hcpu;
1688 switch (action) { 1688 switch (action) {
@@ -1700,7 +1700,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1700 return NOTIFY_OK; 1700 return NOTIFY_OK;
1701} 1701}
1702 1702
1703static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { 1703static struct notifier_block xen_hvm_cpu_notifier = {
1704 .notifier_call = xen_hvm_cpu_notify, 1704 .notifier_call = xen_hvm_cpu_notify,
1705}; 1705};
1706 1706
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 94eac5c85cdc..056d11faef21 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -475,7 +475,7 @@ static void __init fiddle_vdso(void)
475#endif 475#endif
476} 476}
477 477
478static int __cpuinit register_callback(unsigned type, const void *func) 478static int register_callback(unsigned type, const void *func)
479{ 479{
480 struct callback_register callback = { 480 struct callback_register callback = {
481 .type = type, 481 .type = type,
@@ -486,7 +486,7 @@ static int __cpuinit register_callback(unsigned type, const void *func)
486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); 486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
487} 487}
488 488
489void __cpuinit xen_enable_sysenter(void) 489void xen_enable_sysenter(void)
490{ 490{
491 int ret; 491 int ret;
492 unsigned sysenter_feature; 492 unsigned sysenter_feature;
@@ -505,7 +505,7 @@ void __cpuinit xen_enable_sysenter(void)
505 setup_clear_cpu_cap(sysenter_feature); 505 setup_clear_cpu_cap(sysenter_feature);
506} 506}
507 507
508void __cpuinit xen_enable_syscall(void) 508void xen_enable_syscall(void)
509{ 509{
510#ifdef CONFIG_X86_64 510#ifdef CONFIG_X86_64
511 int ret; 511 int ret;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d99cae8147d1..ca92754eb846 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -40,11 +40,15 @@
40 40
41cpumask_var_t xen_cpu_initialized_map; 41cpumask_var_t xen_cpu_initialized_map;
42 42
43static DEFINE_PER_CPU(int, xen_resched_irq); 43struct xen_common_irq {
44static DEFINE_PER_CPU(int, xen_callfunc_irq); 44 int irq;
45static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 45 char *name;
46static DEFINE_PER_CPU(int, xen_irq_work); 46};
47static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 47static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
48static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
49static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
50static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
51static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
48 52
49static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 53static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
50static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 54static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -61,7 +65,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
61 return IRQ_HANDLED; 65 return IRQ_HANDLED;
62} 66}
63 67
64static void __cpuinit cpu_bringup(void) 68static void cpu_bringup(void)
65{ 69{
66 int cpu; 70 int cpu;
67 71
@@ -93,16 +97,53 @@ static void __cpuinit cpu_bringup(void)
93 wmb(); /* make sure everything is out */ 97 wmb(); /* make sure everything is out */
94} 98}
95 99
96static void __cpuinit cpu_bringup_and_idle(void) 100static void cpu_bringup_and_idle(void)
97{ 101{
98 cpu_bringup(); 102 cpu_bringup();
99 cpu_startup_entry(CPUHP_ONLINE); 103 cpu_startup_entry(CPUHP_ONLINE);
100} 104}
101 105
106static void xen_smp_intr_free(unsigned int cpu)
107{
108 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
109 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
110 per_cpu(xen_resched_irq, cpu).irq = -1;
111 kfree(per_cpu(xen_resched_irq, cpu).name);
112 per_cpu(xen_resched_irq, cpu).name = NULL;
113 }
114 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
115 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
116 per_cpu(xen_callfunc_irq, cpu).irq = -1;
117 kfree(per_cpu(xen_callfunc_irq, cpu).name);
118 per_cpu(xen_callfunc_irq, cpu).name = NULL;
119 }
120 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
121 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
122 per_cpu(xen_debug_irq, cpu).irq = -1;
123 kfree(per_cpu(xen_debug_irq, cpu).name);
124 per_cpu(xen_debug_irq, cpu).name = NULL;
125 }
126 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
127 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
128 NULL);
129 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
130 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
131 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
132 }
133 if (xen_hvm_domain())
134 return;
135
136 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
137 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
138 per_cpu(xen_irq_work, cpu).irq = -1;
139 kfree(per_cpu(xen_irq_work, cpu).name);
140 per_cpu(xen_irq_work, cpu).name = NULL;
141 }
142};
102static int xen_smp_intr_init(unsigned int cpu) 143static int xen_smp_intr_init(unsigned int cpu)
103{ 144{
104 int rc; 145 int rc;
105 const char *resched_name, *callfunc_name, *debug_name; 146 char *resched_name, *callfunc_name, *debug_name;
106 147
107 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 148 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
108 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 149 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -113,7 +154,8 @@ static int xen_smp_intr_init(unsigned int cpu)
113 NULL); 154 NULL);
114 if (rc < 0) 155 if (rc < 0)
115 goto fail; 156 goto fail;
116 per_cpu(xen_resched_irq, cpu) = rc; 157 per_cpu(xen_resched_irq, cpu).irq = rc;
158 per_cpu(xen_resched_irq, cpu).name = resched_name;
117 159
118 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 160 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
119 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 161 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -124,7 +166,8 @@ static int xen_smp_intr_init(unsigned int cpu)
124 NULL); 166 NULL);
125 if (rc < 0) 167 if (rc < 0)
126 goto fail; 168 goto fail;
127 per_cpu(xen_callfunc_irq, cpu) = rc; 169 per_cpu(xen_callfunc_irq, cpu).irq = rc;
170 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
128 171
129 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 172 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
130 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 173 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -132,7 +175,8 @@ static int xen_smp_intr_init(unsigned int cpu)
132 debug_name, NULL); 175 debug_name, NULL);
133 if (rc < 0) 176 if (rc < 0)
134 goto fail; 177 goto fail;
135 per_cpu(xen_debug_irq, cpu) = rc; 178 per_cpu(xen_debug_irq, cpu).irq = rc;
179 per_cpu(xen_debug_irq, cpu).name = debug_name;
136 180
137 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 181 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
138 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 182 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -143,7 +187,8 @@ static int xen_smp_intr_init(unsigned int cpu)
143 NULL); 187 NULL);
144 if (rc < 0) 188 if (rc < 0)
145 goto fail; 189 goto fail;
146 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 190 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
191 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
147 192
148 /* 193 /*
149 * The IRQ worker on PVHVM goes through the native path and uses the 194 * The IRQ worker on PVHVM goes through the native path and uses the
@@ -161,26 +206,13 @@ static int xen_smp_intr_init(unsigned int cpu)
161 NULL); 206 NULL);
162 if (rc < 0) 207 if (rc < 0)
163 goto fail; 208 goto fail;
164 per_cpu(xen_irq_work, cpu) = rc; 209 per_cpu(xen_irq_work, cpu).irq = rc;
210 per_cpu(xen_irq_work, cpu).name = callfunc_name;
165 211
166 return 0; 212 return 0;
167 213
168 fail: 214 fail:
169 if (per_cpu(xen_resched_irq, cpu) >= 0) 215 xen_smp_intr_free(cpu);
170 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
171 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
172 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
173 if (per_cpu(xen_debug_irq, cpu) >= 0)
174 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
175 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
176 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
177 NULL);
178 if (xen_hvm_domain())
179 return rc;
180
181 if (per_cpu(xen_irq_work, cpu) >= 0)
182 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
183
184 return rc; 216 return rc;
185} 217}
186 218
@@ -294,7 +326,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
294 set_cpu_present(cpu, true); 326 set_cpu_present(cpu, true);
295} 327}
296 328
297static int __cpuinit 329static int
298cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 330cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
299{ 331{
300 struct vcpu_guest_context *ctxt; 332 struct vcpu_guest_context *ctxt;
@@ -365,7 +397,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
365 return 0; 397 return 0;
366} 398}
367 399
368static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 400static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
369{ 401{
370 int rc; 402 int rc;
371 403
@@ -433,17 +465,12 @@ static void xen_cpu_die(unsigned int cpu)
433 current->state = TASK_UNINTERRUPTIBLE; 465 current->state = TASK_UNINTERRUPTIBLE;
434 schedule_timeout(HZ/10); 466 schedule_timeout(HZ/10);
435 } 467 }
436 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 468 xen_smp_intr_free(cpu);
437 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
438 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
439 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
440 if (!xen_hvm_domain())
441 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
442 xen_uninit_lock_cpu(cpu); 469 xen_uninit_lock_cpu(cpu);
443 xen_teardown_timer(cpu); 470 xen_teardown_timer(cpu);
444} 471}
445 472
446static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 473static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
447{ 474{
448 play_dead_common(); 475 play_dead_common();
449 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
@@ -664,7 +691,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
664 xen_init_lock_cpu(0); 691 xen_init_lock_cpu(0);
665} 692}
666 693
667static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
668{ 695{
669 int rc; 696 int rc;
670 rc = native_cpu_up(cpu, tidle); 697 rc = native_cpu_up(cpu, tidle);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 3002ec1bb71a..cf3caee356b3 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -7,6 +7,7 @@
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/log2.h> 8#include <linux/log2.h>
9#include <linux/gfp.h> 9#include <linux/gfp.h>
10#include <linux/slab.h>
10 11
11#include <asm/paravirt.h> 12#include <asm/paravirt.h>
12 13
@@ -165,6 +166,7 @@ static int xen_spin_trylock(struct arch_spinlock *lock)
165 return old == 0; 166 return old == 0;
166} 167}
167 168
169static DEFINE_PER_CPU(char *, irq_name);
168static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 170static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
169static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 171static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
170 172
@@ -359,10 +361,10 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
359 return IRQ_HANDLED; 361 return IRQ_HANDLED;
360} 362}
361 363
362void __cpuinit xen_init_lock_cpu(int cpu) 364void xen_init_lock_cpu(int cpu)
363{ 365{
364 int irq; 366 int irq;
365 const char *name; 367 char *name;
366 368
367 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", 369 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
368 cpu, per_cpu(lock_kicker_irq, cpu)); 370 cpu, per_cpu(lock_kicker_irq, cpu));
@@ -385,6 +387,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
385 if (irq >= 0) { 387 if (irq >= 0) {
386 disable_irq(irq); /* make sure it's never delivered */ 388 disable_irq(irq); /* make sure it's never delivered */
387 per_cpu(lock_kicker_irq, cpu) = irq; 389 per_cpu(lock_kicker_irq, cpu) = irq;
390 per_cpu(irq_name, cpu) = name;
388 } 391 }
389 392
390 printk("cpu %d spinlock event irq %d\n", cpu, irq); 393 printk("cpu %d spinlock event irq %d\n", cpu, irq);
@@ -401,6 +404,8 @@ void xen_uninit_lock_cpu(int cpu)
401 404
402 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); 405 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
403 per_cpu(lock_kicker_irq, cpu) = -1; 406 per_cpu(lock_kicker_irq, cpu) = -1;
407 kfree(per_cpu(irq_name, cpu));
408 per_cpu(irq_name, cpu) = NULL;
404} 409}
405 410
406void __init xen_init_spinlocks(void) 411void __init xen_init_spinlocks(void)
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 3d88bfdf9e1c..ee365895b06b 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -14,6 +14,8 @@
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/math64.h> 15#include <linux/math64.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/slab.h>
18#include <linux/pvclock_gtod.h>
17 19
18#include <asm/pvclock.h> 20#include <asm/pvclock.h>
19#include <asm/xen/hypervisor.h> 21#include <asm/xen/hypervisor.h>
@@ -36,9 +38,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
36/* snapshots of runstate info */ 38/* snapshots of runstate info */
37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); 39static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38 40
39/* unused ns of stolen and blocked time */ 41/* unused ns of stolen time */
40static DEFINE_PER_CPU(u64, xen_residual_stolen); 42static DEFINE_PER_CPU(u64, xen_residual_stolen);
41static DEFINE_PER_CPU(u64, xen_residual_blocked);
42 43
43/* return an consistent snapshot of 64-bit time/counter value */ 44/* return an consistent snapshot of 64-bit time/counter value */
44static u64 get64(const u64 *p) 45static u64 get64(const u64 *p)
@@ -115,7 +116,7 @@ static void do_stolen_accounting(void)
115{ 116{
116 struct vcpu_runstate_info state; 117 struct vcpu_runstate_info state;
117 struct vcpu_runstate_info *snap; 118 struct vcpu_runstate_info *snap;
118 s64 blocked, runnable, offline, stolen; 119 s64 runnable, offline, stolen;
119 cputime_t ticks; 120 cputime_t ticks;
120 121
121 get_runstate_snapshot(&state); 122 get_runstate_snapshot(&state);
@@ -125,7 +126,6 @@ static void do_stolen_accounting(void)
125 snap = &__get_cpu_var(xen_runstate_snapshot); 126 snap = &__get_cpu_var(xen_runstate_snapshot);
126 127
127 /* work out how much time the VCPU has not been runn*ing* */ 128 /* work out how much time the VCPU has not been runn*ing* */
128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; 129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
130 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; 130 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
131 131
@@ -141,17 +141,6 @@ static void do_stolen_accounting(void)
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __this_cpu_write(xen_residual_stolen, stolen); 142 __this_cpu_write(xen_residual_stolen, stolen);
143 account_steal_ticks(ticks); 143 account_steal_ticks(ticks);
144
145 /* Add the appropriate number of ticks of blocked time,
146 including any left-overs from last time. */
147 blocked += __this_cpu_read(xen_residual_blocked);
148
149 if (blocked < 0)
150 blocked = 0;
151
152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
153 __this_cpu_write(xen_residual_blocked, blocked);
154 account_idle_ticks(ticks);
155} 144}
156 145
157/* Get the TSC speed from Xen */ 146/* Get the TSC speed from Xen */
@@ -191,34 +180,56 @@ static void xen_read_wallclock(struct timespec *ts)
191 put_cpu_var(xen_vcpu); 180 put_cpu_var(xen_vcpu);
192} 181}
193 182
194static unsigned long xen_get_wallclock(void) 183static void xen_get_wallclock(struct timespec *now)
195{ 184{
196 struct timespec ts; 185 xen_read_wallclock(now);
186}
197 187
198 xen_read_wallclock(&ts); 188static int xen_set_wallclock(const struct timespec *now)
199 return ts.tv_sec; 189{
190 return -1;
200} 191}
201 192
202static int xen_set_wallclock(unsigned long now) 193static int xen_pvclock_gtod_notify(struct notifier_block *nb,
194 unsigned long was_set, void *priv)
203{ 195{
196 /* Protected by the calling core code serialization */
197 static struct timespec next_sync;
198
204 struct xen_platform_op op; 199 struct xen_platform_op op;
205 int rc; 200 struct timespec now;
206 201
207 /* do nothing for domU */ 202 now = __current_kernel_time();
208 if (!xen_initial_domain()) 203
209 return -1; 204 /*
205 * We only take the expensive HV call when the clock was set
206 * or when the 11 minutes RTC synchronization time elapsed.
207 */
208 if (!was_set && timespec_compare(&now, &next_sync) < 0)
209 return NOTIFY_OK;
210 210
211 op.cmd = XENPF_settime; 211 op.cmd = XENPF_settime;
212 op.u.settime.secs = now; 212 op.u.settime.secs = now.tv_sec;
213 op.u.settime.nsecs = 0; 213 op.u.settime.nsecs = now.tv_nsec;
214 op.u.settime.system_time = xen_clocksource_read(); 214 op.u.settime.system_time = xen_clocksource_read();
215 215
216 rc = HYPERVISOR_dom0_op(&op); 216 (void)HYPERVISOR_dom0_op(&op);
217 WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now); 217
218 /*
219 * Move the next drift compensation time 11 minutes
220 * ahead. That's emulating the sync_cmos_clock() update for
221 * the hardware RTC.
222 */
223 next_sync = now;
224 next_sync.tv_sec += 11 * 60;
218 225
219 return rc; 226 return NOTIFY_OK;
220} 227}
221 228
229static struct notifier_block xen_pvclock_gtod_notifier = {
230 .notifier_call = xen_pvclock_gtod_notify,
231};
232
222static struct clocksource xen_clocksource __read_mostly = { 233static struct clocksource xen_clocksource __read_mostly = {
223 .name = "xen", 234 .name = "xen",
224 .rating = 400, 235 .rating = 400,
@@ -377,11 +388,16 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
377 388
378static const struct clock_event_device *xen_clockevent = 389static const struct clock_event_device *xen_clockevent =
379 &xen_timerop_clockevent; 390 &xen_timerop_clockevent;
380static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 }; 391
392struct xen_clock_event_device {
393 struct clock_event_device evt;
394 char *name;
395};
396static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
381 397
382static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) 398static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
383{ 399{
384 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events); 400 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
385 irqreturn_t ret; 401 irqreturn_t ret;
386 402
387 ret = IRQ_NONE; 403 ret = IRQ_NONE;
@@ -395,14 +411,30 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
395 return ret; 411 return ret;
396} 412}
397 413
414void xen_teardown_timer(int cpu)
415{
416 struct clock_event_device *evt;
417 BUG_ON(cpu == 0);
418 evt = &per_cpu(xen_clock_events, cpu).evt;
419
420 if (evt->irq >= 0) {
421 unbind_from_irqhandler(evt->irq, NULL);
422 evt->irq = -1;
423 kfree(per_cpu(xen_clock_events, cpu).name);
424 per_cpu(xen_clock_events, cpu).name = NULL;
425 }
426}
427
398void xen_setup_timer(int cpu) 428void xen_setup_timer(int cpu)
399{ 429{
400 const char *name; 430 char *name;
401 struct clock_event_device *evt; 431 struct clock_event_device *evt;
402 int irq; 432 int irq;
403 433
404 evt = &per_cpu(xen_clock_events, cpu); 434 evt = &per_cpu(xen_clock_events, cpu).evt;
405 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); 435 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
436 if (evt->irq >= 0)
437 xen_teardown_timer(cpu);
406 438
407 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 439 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
408 440
@@ -420,22 +452,15 @@ void xen_setup_timer(int cpu)
420 452
421 evt->cpumask = cpumask_of(cpu); 453 evt->cpumask = cpumask_of(cpu);
422 evt->irq = irq; 454 evt->irq = irq;
455 per_cpu(xen_clock_events, cpu).name = name;
423} 456}
424 457
425void xen_teardown_timer(int cpu)
426{
427 struct clock_event_device *evt;
428 BUG_ON(cpu == 0);
429 evt = &per_cpu(xen_clock_events, cpu);
430 unbind_from_irqhandler(evt->irq, NULL);
431 evt->irq = -1;
432}
433 458
434void xen_setup_cpu_clockevents(void) 459void xen_setup_cpu_clockevents(void)
435{ 460{
436 BUG_ON(preemptible()); 461 BUG_ON(preemptible());
437 462
438 clockevents_register_device(&__get_cpu_var(xen_clock_events)); 463 clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
439} 464}
440 465
441void xen_timer_resume(void) 466void xen_timer_resume(void)
@@ -480,6 +505,9 @@ static void __init xen_time_init(void)
480 xen_setup_runstate_info(cpu); 505 xen_setup_runstate_info(cpu);
481 xen_setup_timer(cpu); 506 xen_setup_timer(cpu);
482 xen_setup_cpu_clockevents(); 507 xen_setup_cpu_clockevents();
508
509 if (xen_initial_domain())
510 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
483} 511}
484 512
485void __init xen_init_time_ops(void) 513void __init xen_init_time_ops(void)
@@ -492,7 +520,9 @@ void __init xen_init_time_ops(void)
492 520
493 x86_platform.calibrate_tsc = xen_tsc_khz; 521 x86_platform.calibrate_tsc = xen_tsc_khz;
494 x86_platform.get_wallclock = xen_get_wallclock; 522 x86_platform.get_wallclock = xen_get_wallclock;
495 x86_platform.set_wallclock = xen_set_wallclock; 523 /* Dom0 uses the native method to set the hardware RTC. */
524 if (!xen_initial_domain())
525 x86_platform.set_wallclock = xen_set_wallclock;
496} 526}
497 527
498#ifdef CONFIG_XEN_PVHVM 528#ifdef CONFIG_XEN_PVHVM
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a95b41744ad0..86782c5d7e2a 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -73,7 +73,7 @@ static inline void xen_hvm_smp_init(void) {}
73 73
74#ifdef CONFIG_PARAVIRT_SPINLOCKS 74#ifdef CONFIG_PARAVIRT_SPINLOCKS
75void __init xen_init_spinlocks(void); 75void __init xen_init_spinlocks(void);
76void __cpuinit xen_init_lock_cpu(int cpu); 76void xen_init_lock_cpu(int cpu);
77void xen_uninit_lock_cpu(int cpu); 77void xen_uninit_lock_cpu(int cpu);
78#else 78#else
79static inline void xen_init_spinlocks(void) 79static inline void xen_init_spinlocks(void)