aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-28 14:53:57 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-31 06:39:39 -0400
commit0d1edf46ba229b46efacf75c0544b88c05a7b266 (patch)
treea26262e9f4e85ba7150dfa4e06998a37daa6a984
parenteac4345be6d17541039791f15f173d0426423df1 (diff)
xen: compile irq functions without -pg for ftrace
For some reason I managed to miss a bunch of irq-related functions which also need to be compiled without -pg when using ftrace. This patch moves them into their own file, and starts a cleanup process I've been meaning to do anyway. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: "Alex Nixon (Intern)" <Alex.Nixon@eu.citrix.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/xen/Makefile3
-rw-r--r--arch/x86/xen/enlighten.c122
-rw-r--r--arch/x86/xen/irq.c143
-rw-r--r--arch/x86/xen/xen-asm_32.S2
-rw-r--r--arch/x86/xen/xen-asm_64.S2
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--drivers/xen/events.c11
7 files changed, 150 insertions, 134 deletions
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 5bfee243cf9a..9ee745fa5527 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -2,9 +2,10 @@ ifdef CONFIG_FTRACE
2# Do not profile debug and lowlevel utilities 2# Do not profile debug and lowlevel utilities
3CFLAGS_REMOVE_spinlock.o = -pg 3CFLAGS_REMOVE_spinlock.o = -pg
4CFLAGS_REMOVE_time.o = -pg 4CFLAGS_REMOVE_time.o = -pg
5CFLAGS_REMOVE_irq.o = -pg
5endif 6endif
6 7
7obj-y := enlighten.o setup.o multicalls.o mmu.o \ 8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
8 time.o xen-asm_$(BITS).o grant-table.o suspend.o 9 time.o xen-asm_$(BITS).o grant-table.o suspend.o
9 10
10obj-$(CONFIG_SMP) += smp.o spinlock.o 11obj-$(CONFIG_SMP) += smp.o spinlock.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b795470ec069..cf8b3a93122b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -30,7 +30,6 @@
30#include <xen/interface/xen.h> 30#include <xen/interface/xen.h>
31#include <xen/interface/physdev.h> 31#include <xen/interface/physdev.h>
32#include <xen/interface/vcpu.h> 32#include <xen/interface/vcpu.h>
33#include <xen/interface/sched.h>
34#include <xen/features.h> 33#include <xen/features.h>
35#include <xen/page.h> 34#include <xen/page.h>
36#include <xen/hvc-console.h> 35#include <xen/hvc-console.h>
@@ -226,94 +225,6 @@ static unsigned long xen_get_debugreg(int reg)
226 return HYPERVISOR_get_debugreg(reg); 225 return HYPERVISOR_get_debugreg(reg);
227} 226}
228 227
229static unsigned long xen_save_fl(void)
230{
231 struct vcpu_info *vcpu;
232 unsigned long flags;
233
234 vcpu = x86_read_percpu(xen_vcpu);
235
236 /* flag has opposite sense of mask */
237 flags = !vcpu->evtchn_upcall_mask;
238
239 /* convert to IF type flag
240 -0 -> 0x00000000
241 -1 -> 0xffffffff
242 */
243 return (-flags) & X86_EFLAGS_IF;
244}
245
246static void xen_restore_fl(unsigned long flags)
247{
248 struct vcpu_info *vcpu;
249
250 /* convert from IF type flag */
251 flags = !(flags & X86_EFLAGS_IF);
252
253 /* There's a one instruction preempt window here. We need to
254 make sure we're don't switch CPUs between getting the vcpu
255 pointer and updating the mask. */
256 preempt_disable();
257 vcpu = x86_read_percpu(xen_vcpu);
258 vcpu->evtchn_upcall_mask = flags;
259 preempt_enable_no_resched();
260
261 /* Doesn't matter if we get preempted here, because any
262 pending event will get dealt with anyway. */
263
264 if (flags == 0) {
265 preempt_check_resched();
266 barrier(); /* unmask then check (avoid races) */
267 if (unlikely(vcpu->evtchn_upcall_pending))
268 force_evtchn_callback();
269 }
270}
271
272static void xen_irq_disable(void)
273{
274 /* There's a one instruction preempt window here. We need to
275 make sure we're don't switch CPUs between getting the vcpu
276 pointer and updating the mask. */
277 preempt_disable();
278 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
279 preempt_enable_no_resched();
280}
281
282static void xen_irq_enable(void)
283{
284 struct vcpu_info *vcpu;
285
286 /* We don't need to worry about being preempted here, since
287 either a) interrupts are disabled, so no preemption, or b)
288 the caller is confused and is trying to re-enable interrupts
289 on an indeterminate processor. */
290
291 vcpu = x86_read_percpu(xen_vcpu);
292 vcpu->evtchn_upcall_mask = 0;
293
294 /* Doesn't matter if we get preempted here, because any
295 pending event will get dealt with anyway. */
296
297 barrier(); /* unmask then check (avoid races) */
298 if (unlikely(vcpu->evtchn_upcall_pending))
299 force_evtchn_callback();
300}
301
302static void xen_safe_halt(void)
303{
304 /* Blocking includes an implicit local_irq_enable(). */
305 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
306 BUG();
307}
308
309static void xen_halt(void)
310{
311 if (irqs_disabled())
312 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
313 else
314 xen_safe_halt();
315}
316
317static void xen_leave_lazy(void) 228static void xen_leave_lazy(void)
318{ 229{
319 paravirt_leave_lazy(paravirt_get_lazy_mode()); 230 paravirt_leave_lazy(paravirt_get_lazy_mode());
@@ -1308,36 +1219,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
1308 }, 1219 },
1309}; 1220};
1310 1221
1311static void __init __xen_init_IRQ(void)
1312{
1313#ifdef CONFIG_X86_64
1314 int i;
1315
1316 /* Create identity vector->irq map */
1317 for(i = 0; i < NR_VECTORS; i++) {
1318 int cpu;
1319
1320 for_each_possible_cpu(cpu)
1321 per_cpu(vector_irq, cpu)[i] = i;
1322 }
1323#endif /* CONFIG_X86_64 */
1324
1325 xen_init_IRQ();
1326}
1327
1328static const struct pv_irq_ops xen_irq_ops __initdata = {
1329 .init_IRQ = __xen_init_IRQ,
1330 .save_fl = xen_save_fl,
1331 .restore_fl = xen_restore_fl,
1332 .irq_disable = xen_irq_disable,
1333 .irq_enable = xen_irq_enable,
1334 .safe_halt = xen_safe_halt,
1335 .halt = xen_halt,
1336#ifdef CONFIG_X86_64
1337 .adjust_exception_frame = xen_adjust_exception_frame,
1338#endif
1339};
1340
1341static const struct pv_apic_ops xen_apic_ops __initdata = { 1222static const struct pv_apic_ops xen_apic_ops __initdata = {
1342#ifdef CONFIG_X86_LOCAL_APIC 1223#ifdef CONFIG_X86_LOCAL_APIC
1343 .apic_write = xen_apic_write, 1224 .apic_write = xen_apic_write,
@@ -1740,10 +1621,11 @@ asmlinkage void __init xen_start_kernel(void)
1740 pv_init_ops = xen_init_ops; 1621 pv_init_ops = xen_init_ops;
1741 pv_time_ops = xen_time_ops; 1622 pv_time_ops = xen_time_ops;
1742 pv_cpu_ops = xen_cpu_ops; 1623 pv_cpu_ops = xen_cpu_ops;
1743 pv_irq_ops = xen_irq_ops;
1744 pv_apic_ops = xen_apic_ops; 1624 pv_apic_ops = xen_apic_ops;
1745 pv_mmu_ops = xen_mmu_ops; 1625 pv_mmu_ops = xen_mmu_ops;
1746 1626
1627 xen_init_irq_ops();
1628
1747 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 1629 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1748 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; 1630 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1749 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; 1631 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
new file mode 100644
index 000000000000..28b85ab8422e
--- /dev/null
+++ b/arch/x86/xen/irq.c
@@ -0,0 +1,143 @@
1#include <linux/hardirq.h>
2
3#include <xen/interface/xen.h>
4#include <xen/interface/sched.h>
5#include <xen/interface/vcpu.h>
6
7#include <asm/xen/hypercall.h>
8#include <asm/xen/hypervisor.h>
9
10#include "xen-ops.h"
11
12/*
13 * Force a proper event-channel callback from Xen after clearing the
14 * callback mask. We do this in a very simple manner, by making a call
15 * down into Xen. The pending flag will be checked by Xen on return.
16 */
17void xen_force_evtchn_callback(void)
18{
19 (void)HYPERVISOR_xen_version(0, NULL);
20}
21
22static void __init __xen_init_IRQ(void)
23{
24#ifdef CONFIG_X86_64
25 int i;
26
27 /* Create identity vector->irq map */
28 for(i = 0; i < NR_VECTORS; i++) {
29 int cpu;
30
31 for_each_possible_cpu(cpu)
32 per_cpu(vector_irq, cpu)[i] = i;
33 }
34#endif /* CONFIG_X86_64 */
35
36 xen_init_IRQ();
37}
38
39static unsigned long xen_save_fl(void)
40{
41 struct vcpu_info *vcpu;
42 unsigned long flags;
43
44 vcpu = x86_read_percpu(xen_vcpu);
45
46 /* flag has opposite sense of mask */
47 flags = !vcpu->evtchn_upcall_mask;
48
49 /* convert to IF type flag
50 -0 -> 0x00000000
51 -1 -> 0xffffffff
52 */
53 return (-flags) & X86_EFLAGS_IF;
54}
55
56static void xen_restore_fl(unsigned long flags)
57{
58 struct vcpu_info *vcpu;
59
60 /* convert from IF type flag */
61 flags = !(flags & X86_EFLAGS_IF);
62
63 /* There's a one instruction preempt window here. We need to
64 make sure we're don't switch CPUs between getting the vcpu
65 pointer and updating the mask. */
66 preempt_disable();
67 vcpu = x86_read_percpu(xen_vcpu);
68 vcpu->evtchn_upcall_mask = flags;
69 preempt_enable_no_resched();
70
71 /* Doesn't matter if we get preempted here, because any
72 pending event will get dealt with anyway. */
73
74 if (flags == 0) {
75 preempt_check_resched();
76 barrier(); /* unmask then check (avoid races) */
77 if (unlikely(vcpu->evtchn_upcall_pending))
78 xen_force_evtchn_callback();
79 }
80}
81
82static void xen_irq_disable(void)
83{
84 /* There's a one instruction preempt window here. We need to
85 make sure we're don't switch CPUs between getting the vcpu
86 pointer and updating the mask. */
87 preempt_disable();
88 x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
89 preempt_enable_no_resched();
90}
91
92static void xen_irq_enable(void)
93{
94 struct vcpu_info *vcpu;
95
96 /* We don't need to worry about being preempted here, since
97 either a) interrupts are disabled, so no preemption, or b)
98 the caller is confused and is trying to re-enable interrupts
99 on an indeterminate processor. */
100
101 vcpu = x86_read_percpu(xen_vcpu);
102 vcpu->evtchn_upcall_mask = 0;
103
104 /* Doesn't matter if we get preempted here, because any
105 pending event will get dealt with anyway. */
106
107 barrier(); /* unmask then check (avoid races) */
108 if (unlikely(vcpu->evtchn_upcall_pending))
109 xen_force_evtchn_callback();
110}
111
112static void xen_safe_halt(void)
113{
114 /* Blocking includes an implicit local_irq_enable(). */
115 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
116 BUG();
117}
118
119static void xen_halt(void)
120{
121 if (irqs_disabled())
122 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
123 else
124 xen_safe_halt();
125}
126
127static const struct pv_irq_ops xen_irq_ops __initdata = {
128 .init_IRQ = __xen_init_IRQ,
129 .save_fl = xen_save_fl,
130 .restore_fl = xen_restore_fl,
131 .irq_disable = xen_irq_disable,
132 .irq_enable = xen_irq_enable,
133 .safe_halt = xen_safe_halt,
134 .halt = xen_halt,
135#ifdef CONFIG_X86_64
136 .adjust_exception_frame = xen_adjust_exception_frame,
137#endif
138};
139
140void __init xen_init_irq_ops()
141{
142 pv_irq_ops = xen_irq_ops;
143}
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 2497a30f41de..42786f59d9c0 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -298,7 +298,7 @@ check_events:
298 push %eax 298 push %eax
299 push %ecx 299 push %ecx
300 push %edx 300 push %edx
301 call force_evtchn_callback 301 call xen_force_evtchn_callback
302 pop %edx 302 pop %edx
303 pop %ecx 303 pop %ecx
304 pop %eax 304 pop %eax
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 7f58304fafb3..3b9bda46487a 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -122,7 +122,7 @@ check_events:
122 push %r9 122 push %r9
123 push %r10 123 push %r10
124 push %r11 124 push %r11
125 call force_evtchn_callback 125 call xen_force_evtchn_callback
126 pop %r11 126 pop %r11
127 pop %r10 127 pop %r10
128 pop %r9 128 pop %r9
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 8847fb34f17e..3c70ebc50b1b 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -31,6 +31,7 @@ void xen_vcpu_restore(void);
31 31
32void __init xen_build_dynamic_phys_to_machine(void); 32void __init xen_build_dynamic_phys_to_machine(void);
33 33
34void xen_init_irq_ops(void);
34void xen_setup_timer(int cpu); 35void xen_setup_timer(int cpu);
35void xen_setup_cpu_clockevents(void); 36void xen_setup_cpu_clockevents(void);
36unsigned long xen_tsc_khz(void); 37unsigned long xen_tsc_khz(void);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0e0c28574af8..a0837036d898 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -84,17 +84,6 @@ static int irq_bindcount[NR_IRQS];
84/* Xen will never allocate port zero for any purpose. */ 84/* Xen will never allocate port zero for any purpose. */
85#define VALID_EVTCHN(chn) ((chn) != 0) 85#define VALID_EVTCHN(chn) ((chn) != 0)
86 86
87/*
88 * Force a proper event-channel callback from Xen after clearing the
89 * callback mask. We do this in a very simple manner, by making a call
90 * down into Xen. The pending flag will be checked by Xen on return.
91 */
92void force_evtchn_callback(void)
93{
94 (void)HYPERVISOR_xen_version(0, NULL);
95}
96EXPORT_SYMBOL_GPL(force_evtchn_callback);
97
98static struct irq_chip xen_dynamic_chip; 87static struct irq_chip xen_dynamic_chip;
99 88
100/* Constructor for packed IRQ information. */ 89/* Constructor for packed IRQ information. */