aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 16:12:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 16:12:42 -0400
commit3e34131a65127e73fbae68c82748f32c8af7e4a4 (patch)
tree2e404beb5a99b3434e460c45539254149d80a178 /arch/x86/xen
parentf3acb96f38bb16057e98f862e70e56ca3588ef54 (diff)
parent0b0c002c340e78173789f8afaa508070d838cf3d (diff)
Merge tag 'stable/for-linus-3.11-rc0-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen bugfixes from Konrad Rzeszutek Wilk: - Fix memory leak when CPU hotplugging. - Compile bugs with various #ifdefs - Fix state changes in Xen PCI front not dealing well with new toolstack. - Cleanups in code (use pr_*, fix 80 characters splits, etc) - Long standing bug in double-reporting the steal time * tag 'stable/for-linus-3.11-rc0-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen/time: remove blocked time accounting from xen "clockchip" xen: Convert printks to pr_<level> xen: ifdef CONFIG_HIBERNATE_CALLBACKS xen_*_suspend xen/pcifront: Deal with toolstack missing 'XenbusStateClosing' state. xen/time: Free onlined per-cpu data structure if we want to online it again. xen/time: Check that the per_cpu data structure has data before freeing. xen/time: Don't leak interrupt name when offlining. xen/time: Encapsulate the struct clock_event_device in another structure. xen/spinlock: Don't leak interrupt name when offlining. xen/smp: Don't leak interrupt name when offlining. xen/smp: Set the per-cpu IRQ number to a valid default. xen/smp: Introduce a common structure to contain the IRQ name and interrupt line. xen/smp: Coalesce the free_irq calls in one function. xen-pciback: fix error return code in pcistub_irq_handler_switch()
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/smp.c91
-rw-r--r--arch/x86/xen/spinlock.c7
-rw-r--r--arch/x86/xen/time.c58
3 files changed, 95 insertions, 61 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d99cae8147d1..c1367b29c3b1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -40,11 +40,15 @@
40 40
41cpumask_var_t xen_cpu_initialized_map; 41cpumask_var_t xen_cpu_initialized_map;
42 42
43static DEFINE_PER_CPU(int, xen_resched_irq); 43struct xen_common_irq {
44static DEFINE_PER_CPU(int, xen_callfunc_irq); 44 int irq;
45static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 45 char *name;
46static DEFINE_PER_CPU(int, xen_irq_work); 46};
47static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 47static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
48static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
49static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
50static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
51static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
48 52
49static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 53static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
50static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 54static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -99,10 +103,47 @@ static void __cpuinit cpu_bringup_and_idle(void)
99 cpu_startup_entry(CPUHP_ONLINE); 103 cpu_startup_entry(CPUHP_ONLINE);
100} 104}
101 105
106static void xen_smp_intr_free(unsigned int cpu)
107{
108 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
109 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
110 per_cpu(xen_resched_irq, cpu).irq = -1;
111 kfree(per_cpu(xen_resched_irq, cpu).name);
112 per_cpu(xen_resched_irq, cpu).name = NULL;
113 }
114 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
115 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
116 per_cpu(xen_callfunc_irq, cpu).irq = -1;
117 kfree(per_cpu(xen_callfunc_irq, cpu).name);
118 per_cpu(xen_callfunc_irq, cpu).name = NULL;
119 }
120 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
121 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
122 per_cpu(xen_debug_irq, cpu).irq = -1;
123 kfree(per_cpu(xen_debug_irq, cpu).name);
124 per_cpu(xen_debug_irq, cpu).name = NULL;
125 }
126 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
127 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
128 NULL);
129 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
130 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
131 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
132 }
133 if (xen_hvm_domain())
134 return;
135
136 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
137 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
138 per_cpu(xen_irq_work, cpu).irq = -1;
139 kfree(per_cpu(xen_irq_work, cpu).name);
140 per_cpu(xen_irq_work, cpu).name = NULL;
141 }
142};
102static int xen_smp_intr_init(unsigned int cpu) 143static int xen_smp_intr_init(unsigned int cpu)
103{ 144{
104 int rc; 145 int rc;
105 const char *resched_name, *callfunc_name, *debug_name; 146 char *resched_name, *callfunc_name, *debug_name;
106 147
107 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 148 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
108 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 149 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -113,7 +154,8 @@ static int xen_smp_intr_init(unsigned int cpu)
113 NULL); 154 NULL);
114 if (rc < 0) 155 if (rc < 0)
115 goto fail; 156 goto fail;
116 per_cpu(xen_resched_irq, cpu) = rc; 157 per_cpu(xen_resched_irq, cpu).irq = rc;
158 per_cpu(xen_resched_irq, cpu).name = resched_name;
117 159
118 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 160 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
119 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 161 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -124,7 +166,8 @@ static int xen_smp_intr_init(unsigned int cpu)
124 NULL); 166 NULL);
125 if (rc < 0) 167 if (rc < 0)
126 goto fail; 168 goto fail;
127 per_cpu(xen_callfunc_irq, cpu) = rc; 169 per_cpu(xen_callfunc_irq, cpu).irq = rc;
170 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
128 171
129 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 172 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
130 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 173 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -132,7 +175,8 @@ static int xen_smp_intr_init(unsigned int cpu)
132 debug_name, NULL); 175 debug_name, NULL);
133 if (rc < 0) 176 if (rc < 0)
134 goto fail; 177 goto fail;
135 per_cpu(xen_debug_irq, cpu) = rc; 178 per_cpu(xen_debug_irq, cpu).irq = rc;
179 per_cpu(xen_debug_irq, cpu).name = debug_name;
136 180
137 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 181 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
138 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 182 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -143,7 +187,8 @@ static int xen_smp_intr_init(unsigned int cpu)
143 NULL); 187 NULL);
144 if (rc < 0) 188 if (rc < 0)
145 goto fail; 189 goto fail;
146 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 190 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
191 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
147 192
148 /* 193 /*
149 * The IRQ worker on PVHVM goes through the native path and uses the 194 * The IRQ worker on PVHVM goes through the native path and uses the
@@ -161,26 +206,13 @@ static int xen_smp_intr_init(unsigned int cpu)
161 NULL); 206 NULL);
162 if (rc < 0) 207 if (rc < 0)
163 goto fail; 208 goto fail;
164 per_cpu(xen_irq_work, cpu) = rc; 209 per_cpu(xen_irq_work, cpu).irq = rc;
210 per_cpu(xen_irq_work, cpu).name = callfunc_name;
165 211
166 return 0; 212 return 0;
167 213
168 fail: 214 fail:
169 if (per_cpu(xen_resched_irq, cpu) >= 0) 215 xen_smp_intr_free(cpu);
170 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
171 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
172 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
173 if (per_cpu(xen_debug_irq, cpu) >= 0)
174 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
175 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
176 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
177 NULL);
178 if (xen_hvm_domain())
179 return rc;
180
181 if (per_cpu(xen_irq_work, cpu) >= 0)
182 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
183
184 return rc; 216 return rc;
185} 217}
186 218
@@ -433,12 +465,7 @@ static void xen_cpu_die(unsigned int cpu)
433 current->state = TASK_UNINTERRUPTIBLE; 465 current->state = TASK_UNINTERRUPTIBLE;
434 schedule_timeout(HZ/10); 466 schedule_timeout(HZ/10);
435 } 467 }
436 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 468 xen_smp_intr_free(cpu);
437 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
438 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
439 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
440 if (!xen_hvm_domain())
441 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
442 xen_uninit_lock_cpu(cpu); 469 xen_uninit_lock_cpu(cpu);
443 xen_teardown_timer(cpu); 470 xen_teardown_timer(cpu);
444} 471}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 3002ec1bb71a..a40f8508e760 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -7,6 +7,7 @@
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/log2.h> 8#include <linux/log2.h>
9#include <linux/gfp.h> 9#include <linux/gfp.h>
10#include <linux/slab.h>
10 11
11#include <asm/paravirt.h> 12#include <asm/paravirt.h>
12 13
@@ -165,6 +166,7 @@ static int xen_spin_trylock(struct arch_spinlock *lock)
165 return old == 0; 166 return old == 0;
166} 167}
167 168
169static DEFINE_PER_CPU(char *, irq_name);
168static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 170static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
169static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 171static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
170 172
@@ -362,7 +364,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
362void __cpuinit xen_init_lock_cpu(int cpu) 364void __cpuinit xen_init_lock_cpu(int cpu)
363{ 365{
364 int irq; 366 int irq;
365 const char *name; 367 char *name;
366 368
367 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", 369 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
368 cpu, per_cpu(lock_kicker_irq, cpu)); 370 cpu, per_cpu(lock_kicker_irq, cpu));
@@ -385,6 +387,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
385 if (irq >= 0) { 387 if (irq >= 0) {
386 disable_irq(irq); /* make sure it's never delivered */ 388 disable_irq(irq); /* make sure it's never delivered */
387 per_cpu(lock_kicker_irq, cpu) = irq; 389 per_cpu(lock_kicker_irq, cpu) = irq;
390 per_cpu(irq_name, cpu) = name;
388 } 391 }
389 392
390 printk("cpu %d spinlock event irq %d\n", cpu, irq); 393 printk("cpu %d spinlock event irq %d\n", cpu, irq);
@@ -401,6 +404,8 @@ void xen_uninit_lock_cpu(int cpu)
401 404
402 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); 405 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
403 per_cpu(lock_kicker_irq, cpu) = -1; 406 per_cpu(lock_kicker_irq, cpu) = -1;
407 kfree(per_cpu(irq_name, cpu));
408 per_cpu(irq_name, cpu) = NULL;
404} 409}
405 410
406void __init xen_init_spinlocks(void) 411void __init xen_init_spinlocks(void)
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 3d88bfdf9e1c..a690868be837 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -14,6 +14,7 @@
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/math64.h> 15#include <linux/math64.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/slab.h>
17 18
18#include <asm/pvclock.h> 19#include <asm/pvclock.h>
19#include <asm/xen/hypervisor.h> 20#include <asm/xen/hypervisor.h>
@@ -36,9 +37,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
36/* snapshots of runstate info */ 37/* snapshots of runstate info */
37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); 38static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38 39
39/* unused ns of stolen and blocked time */ 40/* unused ns of stolen time */
40static DEFINE_PER_CPU(u64, xen_residual_stolen); 41static DEFINE_PER_CPU(u64, xen_residual_stolen);
41static DEFINE_PER_CPU(u64, xen_residual_blocked);
42 42
43/* return an consistent snapshot of 64-bit time/counter value */ 43/* return an consistent snapshot of 64-bit time/counter value */
44static u64 get64(const u64 *p) 44static u64 get64(const u64 *p)
@@ -115,7 +115,7 @@ static void do_stolen_accounting(void)
115{ 115{
116 struct vcpu_runstate_info state; 116 struct vcpu_runstate_info state;
117 struct vcpu_runstate_info *snap; 117 struct vcpu_runstate_info *snap;
118 s64 blocked, runnable, offline, stolen; 118 s64 runnable, offline, stolen;
119 cputime_t ticks; 119 cputime_t ticks;
120 120
121 get_runstate_snapshot(&state); 121 get_runstate_snapshot(&state);
@@ -125,7 +125,6 @@ static void do_stolen_accounting(void)
125 snap = &__get_cpu_var(xen_runstate_snapshot); 125 snap = &__get_cpu_var(xen_runstate_snapshot);
126 126
127 /* work out how much time the VCPU has not been runn*ing* */ 127 /* work out how much time the VCPU has not been runn*ing* */
128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; 128 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
130 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; 129 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
131 130
@@ -141,17 +140,6 @@ static void do_stolen_accounting(void)
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 140 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __this_cpu_write(xen_residual_stolen, stolen); 141 __this_cpu_write(xen_residual_stolen, stolen);
143 account_steal_ticks(ticks); 142 account_steal_ticks(ticks);
144
145 /* Add the appropriate number of ticks of blocked time,
146 including any left-overs from last time. */
147 blocked += __this_cpu_read(xen_residual_blocked);
148
149 if (blocked < 0)
150 blocked = 0;
151
152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
153 __this_cpu_write(xen_residual_blocked, blocked);
154 account_idle_ticks(ticks);
155} 143}
156 144
157/* Get the TSC speed from Xen */ 145/* Get the TSC speed from Xen */
@@ -377,11 +365,16 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
377 365
378static const struct clock_event_device *xen_clockevent = 366static const struct clock_event_device *xen_clockevent =
379 &xen_timerop_clockevent; 367 &xen_timerop_clockevent;
380static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 }; 368
369struct xen_clock_event_device {
370 struct clock_event_device evt;
371 char *name;
372};
373static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
381 374
382static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) 375static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
383{ 376{
384 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events); 377 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
385 irqreturn_t ret; 378 irqreturn_t ret;
386 379
387 ret = IRQ_NONE; 380 ret = IRQ_NONE;
@@ -395,14 +388,30 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
395 return ret; 388 return ret;
396} 389}
397 390
391void xen_teardown_timer(int cpu)
392{
393 struct clock_event_device *evt;
394 BUG_ON(cpu == 0);
395 evt = &per_cpu(xen_clock_events, cpu).evt;
396
397 if (evt->irq >= 0) {
398 unbind_from_irqhandler(evt->irq, NULL);
399 evt->irq = -1;
400 kfree(per_cpu(xen_clock_events, cpu).name);
401 per_cpu(xen_clock_events, cpu).name = NULL;
402 }
403}
404
398void xen_setup_timer(int cpu) 405void xen_setup_timer(int cpu)
399{ 406{
400 const char *name; 407 char *name;
401 struct clock_event_device *evt; 408 struct clock_event_device *evt;
402 int irq; 409 int irq;
403 410
404 evt = &per_cpu(xen_clock_events, cpu); 411 evt = &per_cpu(xen_clock_events, cpu).evt;
405 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); 412 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
413 if (evt->irq >= 0)
414 xen_teardown_timer(cpu);
406 415
407 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 416 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
408 417
@@ -420,22 +429,15 @@ void xen_setup_timer(int cpu)
420 429
421 evt->cpumask = cpumask_of(cpu); 430 evt->cpumask = cpumask_of(cpu);
422 evt->irq = irq; 431 evt->irq = irq;
432 per_cpu(xen_clock_events, cpu).name = name;
423} 433}
424 434
425void xen_teardown_timer(int cpu)
426{
427 struct clock_event_device *evt;
428 BUG_ON(cpu == 0);
429 evt = &per_cpu(xen_clock_events, cpu);
430 unbind_from_irqhandler(evt->irq, NULL);
431 evt->irq = -1;
432}
433 435
434void xen_setup_cpu_clockevents(void) 436void xen_setup_cpu_clockevents(void)
435{ 437{
436 BUG_ON(preemptible()); 438 BUG_ON(preemptible());
437 439
438 clockevents_register_device(&__get_cpu_var(xen_clock_events)); 440 clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
439} 441}
440 442
441void xen_timer_resume(void) 443void xen_timer_resume(void)