diff options
author | Isaku Yamahata <yamahata@valinux.co.jp> | 2008-04-02 13:53:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 17:57:32 -0400 |
commit | e04d0d0767a9c272d3c7300fb7a5221c5e3a71eb (patch) | |
tree | 41316aa05a218fcce5d619c11a57242c4f488d55 /arch/x86/xen | |
parent | af711cda4f94b5fddcdc5eb4134387ae026e3171 (diff) |
xen: move events.c to drivers/xen for IA64/Xen support
move arch/x86/xen/events.c undedr drivers/xen to share codes
with x86 and ia64. And minor adjustment to compile.
ia64/xen also uses events.c
Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com>
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/xen/events.c | 658 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 2 |
3 files changed, 2 insertions, 660 deletions
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index c5e9aa489900..95c59260dccb 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-y := enlighten.o setup.o multicalls.o mmu.o \ | 1 | obj-y := enlighten.o setup.o multicalls.o mmu.o \ |
2 | events.o time.o manage.o xen-asm.o | 2 | time.o manage.o xen-asm.o |
3 | 3 | ||
4 | obj-$(CONFIG_SMP) += smp.o | 4 | obj-$(CONFIG_SMP) += smp.o |
diff --git a/arch/x86/xen/events.c b/arch/x86/xen/events.c deleted file mode 100644 index 85bac298b3cb..000000000000 --- a/arch/x86/xen/events.c +++ /dev/null | |||
@@ -1,658 +0,0 @@ | |||
1 | /* | ||
2 | * Xen event channels | ||
3 | * | ||
4 | * Xen models interrupts with abstract event channels. Because each | ||
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | ||
6 | * must dynamically map irqs<->event channels. The event channels | ||
7 | * interface with the rest of the kernel by defining a xen interrupt | ||
8 | * chip. When an event is recieved, it is mapped to an irq and sent | ||
9 | * through the normal interrupt processing path. | ||
10 | * | ||
11 | * There are four kinds of events which can be mapped to an event | ||
12 | * channel: | ||
13 | * | ||
14 | * 1. Inter-domain notifications. This includes all the virtual | ||
15 | * device events, since they're driven by front-ends in another domain | ||
16 | * (typically dom0). | ||
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | ||
18 | * 3. IPIs. | ||
19 | * 4. Hardware interrupts. Not supported at present. | ||
20 | * | ||
21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | ||
22 | */ | ||
23 | |||
24 | #include <linux/linkage.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/string.h> | ||
29 | |||
30 | #include <asm/ptrace.h> | ||
31 | #include <asm/irq.h> | ||
32 | #include <asm/sync_bitops.h> | ||
33 | #include <asm/xen/hypercall.h> | ||
34 | #include <asm/xen/hypervisor.h> | ||
35 | |||
36 | #include <xen/events.h> | ||
37 | #include <xen/interface/xen.h> | ||
38 | #include <xen/interface/event_channel.h> | ||
39 | |||
40 | #include "xen-ops.h" | ||
41 | |||
42 | /* | ||
43 | * This lock protects updates to the following mapping and reference-count | ||
44 | * arrays. The lock does not need to be acquired to read the mapping tables. | ||
45 | */ | ||
46 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | ||
47 | |||
48 | /* IRQ <-> VIRQ mapping. */ | ||
49 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | ||
50 | |||
51 | /* IRQ <-> IPI mapping */ | ||
52 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | ||
53 | |||
54 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ | ||
55 | struct packed_irq | ||
56 | { | ||
57 | unsigned short evtchn; | ||
58 | unsigned char index; | ||
59 | unsigned char type; | ||
60 | }; | ||
61 | |||
62 | static struct packed_irq irq_info[NR_IRQS]; | ||
63 | |||
64 | /* Binding types. */ | ||
65 | enum { | ||
66 | IRQT_UNBOUND, | ||
67 | IRQT_PIRQ, | ||
68 | IRQT_VIRQ, | ||
69 | IRQT_IPI, | ||
70 | IRQT_EVTCHN | ||
71 | }; | ||
72 | |||
73 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | ||
74 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | ||
75 | |||
76 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | ||
77 | [0 ... NR_EVENT_CHANNELS-1] = -1 | ||
78 | }; | ||
79 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; | ||
80 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | ||
81 | |||
82 | /* Reference counts for bindings to IRQs. */ | ||
83 | static int irq_bindcount[NR_IRQS]; | ||
84 | |||
85 | /* Xen will never allocate port zero for any purpose. */ | ||
86 | #define VALID_EVTCHN(chn) ((chn) != 0) | ||
87 | |||
88 | /* | ||
89 | * Force a proper event-channel callback from Xen after clearing the | ||
90 | * callback mask. We do this in a very simple manner, by making a call | ||
91 | * down into Xen. The pending flag will be checked by Xen on return. | ||
92 | */ | ||
93 | void force_evtchn_callback(void) | ||
94 | { | ||
95 | (void)HYPERVISOR_xen_version(0, NULL); | ||
96 | } | ||
97 | EXPORT_SYMBOL_GPL(force_evtchn_callback); | ||
98 | |||
99 | static struct irq_chip xen_dynamic_chip; | ||
100 | |||
101 | /* Constructor for packed IRQ information. */ | ||
102 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | ||
103 | { | ||
104 | return (struct packed_irq) { evtchn, index, type }; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Accessors for packed IRQ information. | ||
109 | */ | ||
110 | static inline unsigned int evtchn_from_irq(int irq) | ||
111 | { | ||
112 | return irq_info[irq].evtchn; | ||
113 | } | ||
114 | |||
115 | static inline unsigned int index_from_irq(int irq) | ||
116 | { | ||
117 | return irq_info[irq].index; | ||
118 | } | ||
119 | |||
120 | static inline unsigned int type_from_irq(int irq) | ||
121 | { | ||
122 | return irq_info[irq].type; | ||
123 | } | ||
124 | |||
125 | static inline unsigned long active_evtchns(unsigned int cpu, | ||
126 | struct shared_info *sh, | ||
127 | unsigned int idx) | ||
128 | { | ||
129 | return (sh->evtchn_pending[idx] & | ||
130 | cpu_evtchn_mask[cpu][idx] & | ||
131 | ~sh->evtchn_mask[idx]); | ||
132 | } | ||
133 | |||
134 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | ||
135 | { | ||
136 | int irq = evtchn_to_irq[chn]; | ||
137 | |||
138 | BUG_ON(irq == -1); | ||
139 | #ifdef CONFIG_SMP | ||
140 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | ||
141 | #endif | ||
142 | |||
143 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); | ||
144 | __set_bit(chn, cpu_evtchn_mask[cpu]); | ||
145 | |||
146 | cpu_evtchn[chn] = cpu; | ||
147 | } | ||
148 | |||
149 | static void init_evtchn_cpu_bindings(void) | ||
150 | { | ||
151 | #ifdef CONFIG_SMP | ||
152 | int i; | ||
153 | /* By default all event channels notify CPU#0. */ | ||
154 | for (i = 0; i < NR_IRQS; i++) | ||
155 | irq_desc[i].affinity = cpumask_of_cpu(0); | ||
156 | #endif | ||
157 | |||
158 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | ||
159 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); | ||
160 | } | ||
161 | |||
162 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
163 | { | ||
164 | return cpu_evtchn[evtchn]; | ||
165 | } | ||
166 | |||
167 | static inline void clear_evtchn(int port) | ||
168 | { | ||
169 | struct shared_info *s = HYPERVISOR_shared_info; | ||
170 | sync_clear_bit(port, &s->evtchn_pending[0]); | ||
171 | } | ||
172 | |||
173 | static inline void set_evtchn(int port) | ||
174 | { | ||
175 | struct shared_info *s = HYPERVISOR_shared_info; | ||
176 | sync_set_bit(port, &s->evtchn_pending[0]); | ||
177 | } | ||
178 | |||
179 | |||
180 | /** | ||
181 | * notify_remote_via_irq - send event to remote end of event channel via irq | ||
182 | * @irq: irq of event channel to send event to | ||
183 | * | ||
184 | * Unlike notify_remote_via_evtchn(), this is safe to use across | ||
185 | * save/restore. Notifications on a broken connection are silently | ||
186 | * dropped. | ||
187 | */ | ||
188 | void notify_remote_via_irq(int irq) | ||
189 | { | ||
190 | int evtchn = evtchn_from_irq(irq); | ||
191 | |||
192 | if (VALID_EVTCHN(evtchn)) | ||
193 | notify_remote_via_evtchn(evtchn); | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | ||
196 | |||
197 | static void mask_evtchn(int port) | ||
198 | { | ||
199 | struct shared_info *s = HYPERVISOR_shared_info; | ||
200 | sync_set_bit(port, &s->evtchn_mask[0]); | ||
201 | } | ||
202 | |||
203 | static void unmask_evtchn(int port) | ||
204 | { | ||
205 | struct shared_info *s = HYPERVISOR_shared_info; | ||
206 | unsigned int cpu = get_cpu(); | ||
207 | |||
208 | BUG_ON(!irqs_disabled()); | ||
209 | |||
210 | /* Slow path (hypercall) if this is a non-local port. */ | ||
211 | if (unlikely(cpu != cpu_from_evtchn(port))) { | ||
212 | struct evtchn_unmask unmask = { .port = port }; | ||
213 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | ||
214 | } else { | ||
215 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | ||
216 | |||
217 | sync_clear_bit(port, &s->evtchn_mask[0]); | ||
218 | |||
219 | /* | ||
220 | * The following is basically the equivalent of | ||
221 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | ||
222 | * the interrupt edge' if the channel is masked. | ||
223 | */ | ||
224 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | ||
225 | !sync_test_and_set_bit(port / BITS_PER_LONG, | ||
226 | &vcpu_info->evtchn_pending_sel)) | ||
227 | vcpu_info->evtchn_upcall_pending = 1; | ||
228 | } | ||
229 | |||
230 | put_cpu(); | ||
231 | } | ||
232 | |||
233 | static int find_unbound_irq(void) | ||
234 | { | ||
235 | int irq; | ||
236 | |||
237 | /* Only allocate from dynirq range */ | ||
238 | for (irq = 0; irq < NR_IRQS; irq++) | ||
239 | if (irq_bindcount[irq] == 0) | ||
240 | break; | ||
241 | |||
242 | if (irq == NR_IRQS) | ||
243 | panic("No available IRQ to bind to: increase NR_IRQS!\n"); | ||
244 | |||
245 | return irq; | ||
246 | } | ||
247 | |||
248 | int bind_evtchn_to_irq(unsigned int evtchn) | ||
249 | { | ||
250 | int irq; | ||
251 | |||
252 | spin_lock(&irq_mapping_update_lock); | ||
253 | |||
254 | irq = evtchn_to_irq[evtchn]; | ||
255 | |||
256 | if (irq == -1) { | ||
257 | irq = find_unbound_irq(); | ||
258 | |||
259 | dynamic_irq_init(irq); | ||
260 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | ||
261 | handle_level_irq, "event"); | ||
262 | |||
263 | evtchn_to_irq[evtchn] = irq; | ||
264 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | ||
265 | } | ||
266 | |||
267 | irq_bindcount[irq]++; | ||
268 | |||
269 | spin_unlock(&irq_mapping_update_lock); | ||
270 | |||
271 | return irq; | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); | ||
274 | |||
275 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | ||
276 | { | ||
277 | struct evtchn_bind_ipi bind_ipi; | ||
278 | int evtchn, irq; | ||
279 | |||
280 | spin_lock(&irq_mapping_update_lock); | ||
281 | |||
282 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | ||
283 | if (irq == -1) { | ||
284 | irq = find_unbound_irq(); | ||
285 | if (irq < 0) | ||
286 | goto out; | ||
287 | |||
288 | dynamic_irq_init(irq); | ||
289 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | ||
290 | handle_level_irq, "ipi"); | ||
291 | |||
292 | bind_ipi.vcpu = cpu; | ||
293 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | ||
294 | &bind_ipi) != 0) | ||
295 | BUG(); | ||
296 | evtchn = bind_ipi.port; | ||
297 | |||
298 | evtchn_to_irq[evtchn] = irq; | ||
299 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | ||
300 | |||
301 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | ||
302 | |||
303 | bind_evtchn_to_cpu(evtchn, cpu); | ||
304 | } | ||
305 | |||
306 | irq_bindcount[irq]++; | ||
307 | |||
308 | out: | ||
309 | spin_unlock(&irq_mapping_update_lock); | ||
310 | return irq; | ||
311 | } | ||
312 | |||
313 | |||
314 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | ||
315 | { | ||
316 | struct evtchn_bind_virq bind_virq; | ||
317 | int evtchn, irq; | ||
318 | |||
319 | spin_lock(&irq_mapping_update_lock); | ||
320 | |||
321 | irq = per_cpu(virq_to_irq, cpu)[virq]; | ||
322 | |||
323 | if (irq == -1) { | ||
324 | bind_virq.virq = virq; | ||
325 | bind_virq.vcpu = cpu; | ||
326 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | ||
327 | &bind_virq) != 0) | ||
328 | BUG(); | ||
329 | evtchn = bind_virq.port; | ||
330 | |||
331 | irq = find_unbound_irq(); | ||
332 | |||
333 | dynamic_irq_init(irq); | ||
334 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | ||
335 | handle_level_irq, "virq"); | ||
336 | |||
337 | evtchn_to_irq[evtchn] = irq; | ||
338 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | ||
339 | |||
340 | per_cpu(virq_to_irq, cpu)[virq] = irq; | ||
341 | |||
342 | bind_evtchn_to_cpu(evtchn, cpu); | ||
343 | } | ||
344 | |||
345 | irq_bindcount[irq]++; | ||
346 | |||
347 | spin_unlock(&irq_mapping_update_lock); | ||
348 | |||
349 | return irq; | ||
350 | } | ||
351 | |||
352 | static void unbind_from_irq(unsigned int irq) | ||
353 | { | ||
354 | struct evtchn_close close; | ||
355 | int evtchn = evtchn_from_irq(irq); | ||
356 | |||
357 | spin_lock(&irq_mapping_update_lock); | ||
358 | |||
359 | if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) { | ||
360 | close.port = evtchn; | ||
361 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | ||
362 | BUG(); | ||
363 | |||
364 | switch (type_from_irq(irq)) { | ||
365 | case IRQT_VIRQ: | ||
366 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | ||
367 | [index_from_irq(irq)] = -1; | ||
368 | break; | ||
369 | default: | ||
370 | break; | ||
371 | } | ||
372 | |||
373 | /* Closed ports are implicitly re-bound to VCPU0. */ | ||
374 | bind_evtchn_to_cpu(evtchn, 0); | ||
375 | |||
376 | evtchn_to_irq[evtchn] = -1; | ||
377 | irq_info[irq] = IRQ_UNBOUND; | ||
378 | |||
379 | dynamic_irq_init(irq); | ||
380 | } | ||
381 | |||
382 | spin_unlock(&irq_mapping_update_lock); | ||
383 | } | ||
384 | |||
385 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | ||
386 | irq_handler_t handler, | ||
387 | unsigned long irqflags, | ||
388 | const char *devname, void *dev_id) | ||
389 | { | ||
390 | unsigned int irq; | ||
391 | int retval; | ||
392 | |||
393 | irq = bind_evtchn_to_irq(evtchn); | ||
394 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | ||
395 | if (retval != 0) { | ||
396 | unbind_from_irq(irq); | ||
397 | return retval; | ||
398 | } | ||
399 | |||
400 | return irq; | ||
401 | } | ||
402 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | ||
403 | |||
404 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | ||
405 | irq_handler_t handler, | ||
406 | unsigned long irqflags, const char *devname, void *dev_id) | ||
407 | { | ||
408 | unsigned int irq; | ||
409 | int retval; | ||
410 | |||
411 | irq = bind_virq_to_irq(virq, cpu); | ||
412 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | ||
413 | if (retval != 0) { | ||
414 | unbind_from_irq(irq); | ||
415 | return retval; | ||
416 | } | ||
417 | |||
418 | return irq; | ||
419 | } | ||
420 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | ||
421 | |||
422 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, | ||
423 | unsigned int cpu, | ||
424 | irq_handler_t handler, | ||
425 | unsigned long irqflags, | ||
426 | const char *devname, | ||
427 | void *dev_id) | ||
428 | { | ||
429 | int irq, retval; | ||
430 | |||
431 | irq = bind_ipi_to_irq(ipi, cpu); | ||
432 | if (irq < 0) | ||
433 | return irq; | ||
434 | |||
435 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | ||
436 | if (retval != 0) { | ||
437 | unbind_from_irq(irq); | ||
438 | return retval; | ||
439 | } | ||
440 | |||
441 | return irq; | ||
442 | } | ||
443 | |||
444 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) | ||
445 | { | ||
446 | free_irq(irq, dev_id); | ||
447 | unbind_from_irq(irq); | ||
448 | } | ||
449 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | ||
450 | |||
451 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) | ||
452 | { | ||
453 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | ||
454 | BUG_ON(irq < 0); | ||
455 | notify_remote_via_irq(irq); | ||
456 | } | ||
457 | |||
458 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | ||
459 | { | ||
460 | struct shared_info *sh = HYPERVISOR_shared_info; | ||
461 | int cpu = smp_processor_id(); | ||
462 | int i; | ||
463 | unsigned long flags; | ||
464 | static DEFINE_SPINLOCK(debug_lock); | ||
465 | |||
466 | spin_lock_irqsave(&debug_lock, flags); | ||
467 | |||
468 | printk("vcpu %d\n ", cpu); | ||
469 | |||
470 | for_each_online_cpu(i) { | ||
471 | struct vcpu_info *v = per_cpu(xen_vcpu, i); | ||
472 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, | ||
473 | (get_irq_regs() && i == cpu) ? !(get_irq_regs()->flags & X86_EFLAGS_IF) : v->evtchn_upcall_mask, | ||
474 | v->evtchn_upcall_pending, | ||
475 | v->evtchn_pending_sel); | ||
476 | } | ||
477 | printk("pending:\n "); | ||
478 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | ||
479 | printk("%08lx%s", sh->evtchn_pending[i], | ||
480 | i % 8 == 0 ? "\n " : " "); | ||
481 | printk("\nmasks:\n "); | ||
482 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | ||
483 | printk("%08lx%s", sh->evtchn_mask[i], | ||
484 | i % 8 == 0 ? "\n " : " "); | ||
485 | |||
486 | printk("\nunmasked:\n "); | ||
487 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | ||
488 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | ||
489 | i % 8 == 0 ? "\n " : " "); | ||
490 | |||
491 | printk("\npending list:\n"); | ||
492 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | ||
493 | if (sync_test_bit(i, sh->evtchn_pending)) { | ||
494 | printk(" %d: event %d -> irq %d\n", | ||
495 | cpu_evtchn[i], i, | ||
496 | evtchn_to_irq[i]); | ||
497 | } | ||
498 | } | ||
499 | |||
500 | spin_unlock_irqrestore(&debug_lock, flags); | ||
501 | |||
502 | return IRQ_HANDLED; | ||
503 | } | ||
504 | |||
505 | |||
506 | /* | ||
507 | * Search the CPUs pending events bitmasks. For each one found, map | ||
508 | * the event number to an irq, and feed it into do_IRQ() for | ||
509 | * handling. | ||
510 | * | ||
511 | * Xen uses a two-level bitmap to speed searching. The first level is | ||
512 | * a bitset of words which contain pending event bits. The second | ||
513 | * level is a bitset of pending events themselves. | ||
514 | */ | ||
515 | void xen_evtchn_do_upcall(struct pt_regs *regs) | ||
516 | { | ||
517 | int cpu = get_cpu(); | ||
518 | struct shared_info *s = HYPERVISOR_shared_info; | ||
519 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | ||
520 | static DEFINE_PER_CPU(unsigned, nesting_count); | ||
521 | unsigned count; | ||
522 | |||
523 | do { | ||
524 | unsigned long pending_words; | ||
525 | |||
526 | vcpu_info->evtchn_upcall_pending = 0; | ||
527 | |||
528 | if (__get_cpu_var(nesting_count)++) | ||
529 | goto out; | ||
530 | |||
531 | /* NB. No need for a barrier here -- XCHG is a barrier on x86. */ | ||
532 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); | ||
533 | while (pending_words != 0) { | ||
534 | unsigned long pending_bits; | ||
535 | int word_idx = __ffs(pending_words); | ||
536 | pending_words &= ~(1UL << word_idx); | ||
537 | |||
538 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | ||
539 | int bit_idx = __ffs(pending_bits); | ||
540 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | ||
541 | int irq = evtchn_to_irq[port]; | ||
542 | |||
543 | if (irq != -1) { | ||
544 | regs->orig_ax = ~irq; | ||
545 | do_IRQ(regs); | ||
546 | } | ||
547 | } | ||
548 | } | ||
549 | |||
550 | BUG_ON(!irqs_disabled()); | ||
551 | |||
552 | count = __get_cpu_var(nesting_count); | ||
553 | __get_cpu_var(nesting_count) = 0; | ||
554 | } while(count != 1); | ||
555 | |||
556 | out: | ||
557 | put_cpu(); | ||
558 | } | ||
559 | |||
560 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | ||
561 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | ||
562 | { | ||
563 | struct evtchn_bind_vcpu bind_vcpu; | ||
564 | int evtchn = evtchn_from_irq(irq); | ||
565 | |||
566 | if (!VALID_EVTCHN(evtchn)) | ||
567 | return; | ||
568 | |||
569 | /* Send future instances of this interrupt to other vcpu. */ | ||
570 | bind_vcpu.port = evtchn; | ||
571 | bind_vcpu.vcpu = tcpu; | ||
572 | |||
573 | /* | ||
574 | * If this fails, it usually just indicates that we're dealing with a | ||
575 | * virq or IPI channel, which don't actually need to be rebound. Ignore | ||
576 | * it, but don't do the xenlinux-level rebind in that case. | ||
577 | */ | ||
578 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | ||
579 | bind_evtchn_to_cpu(evtchn, tcpu); | ||
580 | } | ||
581 | |||
582 | |||
583 | static void set_affinity_irq(unsigned irq, cpumask_t dest) | ||
584 | { | ||
585 | unsigned tcpu = first_cpu(dest); | ||
586 | rebind_irq_to_cpu(irq, tcpu); | ||
587 | } | ||
588 | |||
589 | static void enable_dynirq(unsigned int irq) | ||
590 | { | ||
591 | int evtchn = evtchn_from_irq(irq); | ||
592 | |||
593 | if (VALID_EVTCHN(evtchn)) | ||
594 | unmask_evtchn(evtchn); | ||
595 | } | ||
596 | |||
597 | static void disable_dynirq(unsigned int irq) | ||
598 | { | ||
599 | int evtchn = evtchn_from_irq(irq); | ||
600 | |||
601 | if (VALID_EVTCHN(evtchn)) | ||
602 | mask_evtchn(evtchn); | ||
603 | } | ||
604 | |||
605 | static void ack_dynirq(unsigned int irq) | ||
606 | { | ||
607 | int evtchn = evtchn_from_irq(irq); | ||
608 | |||
609 | move_native_irq(irq); | ||
610 | |||
611 | if (VALID_EVTCHN(evtchn)) | ||
612 | clear_evtchn(evtchn); | ||
613 | } | ||
614 | |||
615 | static int retrigger_dynirq(unsigned int irq) | ||
616 | { | ||
617 | int evtchn = evtchn_from_irq(irq); | ||
618 | struct shared_info *sh = HYPERVISOR_shared_info; | ||
619 | int ret = 0; | ||
620 | |||
621 | if (VALID_EVTCHN(evtchn)) { | ||
622 | int masked; | ||
623 | |||
624 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | ||
625 | sync_set_bit(evtchn, sh->evtchn_pending); | ||
626 | if (!masked) | ||
627 | unmask_evtchn(evtchn); | ||
628 | ret = 1; | ||
629 | } | ||
630 | |||
631 | return ret; | ||
632 | } | ||
633 | |||
634 | static struct irq_chip xen_dynamic_chip __read_mostly = { | ||
635 | .name = "xen-dyn", | ||
636 | .mask = disable_dynirq, | ||
637 | .unmask = enable_dynirq, | ||
638 | .ack = ack_dynirq, | ||
639 | .set_affinity = set_affinity_irq, | ||
640 | .retrigger = retrigger_dynirq, | ||
641 | }; | ||
642 | |||
643 | void __init xen_init_IRQ(void) | ||
644 | { | ||
645 | int i; | ||
646 | |||
647 | init_evtchn_cpu_bindings(); | ||
648 | |||
649 | /* No event channels are 'live' right now. */ | ||
650 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | ||
651 | mask_evtchn(i); | ||
652 | |||
653 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | ||
654 | for (i = 0; i < NR_IRQS; i++) | ||
655 | irq_bindcount[i] = 0; | ||
656 | |||
657 | irq_ctx_init(smp_processor_id()); | ||
658 | } | ||
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 22395d20dd6e..f1063ae08037 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/irqreturn.h> | 5 | #include <linux/irqreturn.h> |
6 | #include <xen/xen-ops.h> | ||
6 | 7 | ||
7 | /* These are code, but not functions. Defined in entry.S */ | 8 | /* These are code, but not functions. Defined in entry.S */ |
8 | extern const char xen_hypervisor_callback[]; | 9 | extern const char xen_hypervisor_callback[]; |
@@ -10,7 +11,6 @@ extern const char xen_failsafe_callback[]; | |||
10 | 11 | ||
11 | void xen_copy_trap_info(struct trap_info *traps); | 12 | void xen_copy_trap_info(struct trap_info *traps); |
12 | 13 | ||
13 | DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); | ||
14 | DECLARE_PER_CPU(unsigned long, xen_cr3); | 14 | DECLARE_PER_CPU(unsigned long, xen_cr3); |
15 | DECLARE_PER_CPU(unsigned long, xen_current_cr3); | 15 | DECLARE_PER_CPU(unsigned long, xen_current_cr3); |
16 | 16 | ||