diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-14 16:43:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-14 16:43:24 -0400 |
commit | a3da5bf84a97d48cfaf66c6842470fc403da5121 (patch) | |
tree | cdf66c0cff8c61eedd60601fc9dffdd1ed39b880 /drivers/xen | |
parent | 3b23e665b68387f5ee7b21f7b75ceea4d9acae4a (diff) | |
parent | d59fdcf2ac501de99c3dfb452af5e254d4342886 (diff) |
Merge branch 'x86/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (821 commits)
x86: make 64bit hpet_set_mapping to use ioremap too, v2
x86: get x86_phys_bits early
x86: max_low_pfn_mapped fix #4
x86: change _node_to_cpumask_ptr to return const ptr
x86: I/O APIC: remove an IRQ2-mask hack
x86: fix numaq_tsc_disable calling
x86, e820: remove end_user_pfn
x86: max_low_pfn_mapped fix, #3
x86: max_low_pfn_mapped fix, #2
x86: max_low_pfn_mapped fix, #1
x86_64: fix delayed signals
x86: remove conflicting nx6325 and nx6125 quirks
x86: Recover timer_ack lost in the merge of the NMI watchdog
x86: I/O APIC: Never configure IRQ2
x86: L-APIC: Always fully configure IRQ0
x86: L-APIC: Set IRQ0 as edge-triggered
x86: merge dwarf2 headers
x86: use AS_CFI instead of UNWIND_INFO
x86: use ignore macro instead of hash comment
x86: use matching CFI_ENDPROC
...
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/Makefile | 2 | ||||
-rw-r--r-- | drivers/xen/balloon.c | 10 | ||||
-rw-r--r-- | drivers/xen/events.c | 114 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 4 | ||||
-rw-r--r-- | drivers/xen/manage.c | 252 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_comms.c | 23 |
6 files changed, 384 insertions, 21 deletions
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 37af04f1ffd9..363286c54290 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-y += grant-table.o features.o events.o | 1 | obj-y += grant-table.o features.o events.o manage.o |
2 | obj-y += xenbus/ | 2 | obj-y += xenbus/ |
3 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | 3 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o |
4 | obj-$(CONFIG_XEN_BALLOON) += balloon.o | 4 | obj-$(CONFIG_XEN_BALLOON) += balloon.o |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index ab25ba6cbbb9..591bc29b55f5 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -225,7 +225,7 @@ static int increase_reservation(unsigned long nr_pages) | |||
225 | page = balloon_next_page(page); | 225 | page = balloon_next_page(page); |
226 | } | 226 | } |
227 | 227 | ||
228 | reservation.extent_start = (unsigned long)frame_list; | 228 | set_xen_guest_handle(reservation.extent_start, frame_list); |
229 | reservation.nr_extents = nr_pages; | 229 | reservation.nr_extents = nr_pages; |
230 | rc = HYPERVISOR_memory_op( | 230 | rc = HYPERVISOR_memory_op( |
231 | XENMEM_populate_physmap, &reservation); | 231 | XENMEM_populate_physmap, &reservation); |
@@ -321,7 +321,7 @@ static int decrease_reservation(unsigned long nr_pages) | |||
321 | balloon_append(pfn_to_page(pfn)); | 321 | balloon_append(pfn_to_page(pfn)); |
322 | } | 322 | } |
323 | 323 | ||
324 | reservation.extent_start = (unsigned long)frame_list; | 324 | set_xen_guest_handle(reservation.extent_start, frame_list); |
325 | reservation.nr_extents = nr_pages; | 325 | reservation.nr_extents = nr_pages; |
326 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | 326 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); |
327 | BUG_ON(ret != nr_pages); | 327 | BUG_ON(ret != nr_pages); |
@@ -368,7 +368,7 @@ static void balloon_process(struct work_struct *work) | |||
368 | } | 368 | } |
369 | 369 | ||
370 | /* Resets the Xen limit, sets new target, and kicks off processing. */ | 370 | /* Resets the Xen limit, sets new target, and kicks off processing. */ |
371 | void balloon_set_new_target(unsigned long target) | 371 | static void balloon_set_new_target(unsigned long target) |
372 | { | 372 | { |
373 | /* No need for lock. Not read-modify-write updates. */ | 373 | /* No need for lock. Not read-modify-write updates. */ |
374 | balloon_stats.hard_limit = ~0UL; | 374 | balloon_stats.hard_limit = ~0UL; |
@@ -483,7 +483,7 @@ static int dealloc_pte_fn( | |||
483 | .extent_order = 0, | 483 | .extent_order = 0, |
484 | .domid = DOMID_SELF | 484 | .domid = DOMID_SELF |
485 | }; | 485 | }; |
486 | reservation.extent_start = (unsigned long)&mfn; | 486 | set_xen_guest_handle(reservation.extent_start, &mfn); |
487 | set_pte_at(&init_mm, addr, pte, __pte_ma(0ull)); | 487 | set_pte_at(&init_mm, addr, pte, __pte_ma(0ull)); |
488 | set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); | 488 | set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); |
489 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | 489 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); |
@@ -519,7 +519,7 @@ static struct page **alloc_empty_pages_and_pagevec(int nr_pages) | |||
519 | .extent_order = 0, | 519 | .extent_order = 0, |
520 | .domid = DOMID_SELF | 520 | .domid = DOMID_SELF |
521 | }; | 521 | }; |
522 | reservation.extent_start = (unsigned long)&gmfn; | 522 | set_xen_guest_handle(reservation.extent_start, &gmfn); |
523 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, | 523 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, |
524 | &reservation); | 524 | &reservation); |
525 | if (ret == 1) | 525 | if (ret == 1) |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 76e5b7386af9..332dd63750a0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -355,7 +355,7 @@ static void unbind_from_irq(unsigned int irq) | |||
355 | 355 | ||
356 | spin_lock(&irq_mapping_update_lock); | 356 | spin_lock(&irq_mapping_update_lock); |
357 | 357 | ||
358 | if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) { | 358 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { |
359 | close.port = evtchn; | 359 | close.port = evtchn; |
360 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | 360 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
361 | BUG(); | 361 | BUG(); |
@@ -375,7 +375,7 @@ static void unbind_from_irq(unsigned int irq) | |||
375 | evtchn_to_irq[evtchn] = -1; | 375 | evtchn_to_irq[evtchn] = -1; |
376 | irq_info[irq] = IRQ_UNBOUND; | 376 | irq_info[irq] = IRQ_UNBOUND; |
377 | 377 | ||
378 | dynamic_irq_init(irq); | 378 | dynamic_irq_cleanup(irq); |
379 | } | 379 | } |
380 | 380 | ||
381 | spin_unlock(&irq_mapping_update_lock); | 381 | spin_unlock(&irq_mapping_update_lock); |
@@ -557,6 +557,33 @@ out: | |||
557 | put_cpu(); | 557 | put_cpu(); |
558 | } | 558 | } |
559 | 559 | ||
560 | /* Rebind a new event channel to an existing irq. */ | ||
561 | void rebind_evtchn_irq(int evtchn, int irq) | ||
562 | { | ||
563 | /* Make sure the irq is masked, since the new event channel | ||
564 | will also be masked. */ | ||
565 | disable_irq(irq); | ||
566 | |||
567 | spin_lock(&irq_mapping_update_lock); | ||
568 | |||
569 | /* After resume the irq<->evtchn mappings are all cleared out */ | ||
570 | BUG_ON(evtchn_to_irq[evtchn] != -1); | ||
571 | /* Expect irq to have been bound before, | ||
572 | so the bindcount should be non-0 */ | ||
573 | BUG_ON(irq_bindcount[irq] == 0); | ||
574 | |||
575 | evtchn_to_irq[evtchn] = irq; | ||
576 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | ||
577 | |||
578 | spin_unlock(&irq_mapping_update_lock); | ||
579 | |||
580 | /* new event channels are always bound to cpu 0 */ | ||
581 | irq_set_affinity(irq, cpumask_of_cpu(0)); | ||
582 | |||
583 | /* Unmask the event channel. */ | ||
584 | enable_irq(irq); | ||
585 | } | ||
586 | |||
560 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | 587 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
561 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | 588 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
562 | { | 589 | { |
@@ -647,6 +674,89 @@ static int retrigger_dynirq(unsigned int irq) | |||
647 | return ret; | 674 | return ret; |
648 | } | 675 | } |
649 | 676 | ||
677 | static void restore_cpu_virqs(unsigned int cpu) | ||
678 | { | ||
679 | struct evtchn_bind_virq bind_virq; | ||
680 | int virq, irq, evtchn; | ||
681 | |||
682 | for (virq = 0; virq < NR_VIRQS; virq++) { | ||
683 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | ||
684 | continue; | ||
685 | |||
686 | BUG_ON(irq_info[irq].type != IRQT_VIRQ); | ||
687 | BUG_ON(irq_info[irq].index != virq); | ||
688 | |||
689 | /* Get a new binding from Xen. */ | ||
690 | bind_virq.virq = virq; | ||
691 | bind_virq.vcpu = cpu; | ||
692 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | ||
693 | &bind_virq) != 0) | ||
694 | BUG(); | ||
695 | evtchn = bind_virq.port; | ||
696 | |||
697 | /* Record the new mapping. */ | ||
698 | evtchn_to_irq[evtchn] = irq; | ||
699 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | ||
700 | bind_evtchn_to_cpu(evtchn, cpu); | ||
701 | |||
702 | /* Ready for use. */ | ||
703 | unmask_evtchn(evtchn); | ||
704 | } | ||
705 | } | ||
706 | |||
707 | static void restore_cpu_ipis(unsigned int cpu) | ||
708 | { | ||
709 | struct evtchn_bind_ipi bind_ipi; | ||
710 | int ipi, irq, evtchn; | ||
711 | |||
712 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { | ||
713 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | ||
714 | continue; | ||
715 | |||
716 | BUG_ON(irq_info[irq].type != IRQT_IPI); | ||
717 | BUG_ON(irq_info[irq].index != ipi); | ||
718 | |||
719 | /* Get a new binding from Xen. */ | ||
720 | bind_ipi.vcpu = cpu; | ||
721 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | ||
722 | &bind_ipi) != 0) | ||
723 | BUG(); | ||
724 | evtchn = bind_ipi.port; | ||
725 | |||
726 | /* Record the new mapping. */ | ||
727 | evtchn_to_irq[evtchn] = irq; | ||
728 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | ||
729 | bind_evtchn_to_cpu(evtchn, cpu); | ||
730 | |||
731 | /* Ready for use. */ | ||
732 | unmask_evtchn(evtchn); | ||
733 | |||
734 | } | ||
735 | } | ||
736 | |||
737 | void xen_irq_resume(void) | ||
738 | { | ||
739 | unsigned int cpu, irq, evtchn; | ||
740 | |||
741 | init_evtchn_cpu_bindings(); | ||
742 | |||
743 | /* New event-channel space is not 'live' yet. */ | ||
744 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | ||
745 | mask_evtchn(evtchn); | ||
746 | |||
747 | /* No IRQ <-> event-channel mappings. */ | ||
748 | for (irq = 0; irq < NR_IRQS; irq++) | ||
749 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ | ||
750 | |||
751 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | ||
752 | evtchn_to_irq[evtchn] = -1; | ||
753 | |||
754 | for_each_possible_cpu(cpu) { | ||
755 | restore_cpu_virqs(cpu); | ||
756 | restore_cpu_ipis(cpu); | ||
757 | } | ||
758 | } | ||
759 | |||
650 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 760 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
651 | .name = "xen-dyn", | 761 | .name = "xen-dyn", |
652 | .mask = disable_dynirq, | 762 | .mask = disable_dynirq, |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 52b6b41b909d..e9e11168616a 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -471,14 +471,14 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | |||
471 | return 0; | 471 | return 0; |
472 | } | 472 | } |
473 | 473 | ||
474 | static int gnttab_resume(void) | 474 | int gnttab_resume(void) |
475 | { | 475 | { |
476 | if (max_nr_grant_frames() < nr_grant_frames) | 476 | if (max_nr_grant_frames() < nr_grant_frames) |
477 | return -ENOSYS; | 477 | return -ENOSYS; |
478 | return gnttab_map(0, nr_grant_frames - 1); | 478 | return gnttab_map(0, nr_grant_frames - 1); |
479 | } | 479 | } |
480 | 480 | ||
481 | static int gnttab_suspend(void) | 481 | int gnttab_suspend(void) |
482 | { | 482 | { |
483 | arch_gnttab_unmap_shared(shared, nr_grant_frames); | 483 | arch_gnttab_unmap_shared(shared, nr_grant_frames); |
484 | return 0; | 484 | return 0; |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c new file mode 100644 index 000000000000..5b546e365f00 --- /dev/null +++ b/drivers/xen/manage.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Handle extern requests for shutdown, reboot and sysrq | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/err.h> | ||
6 | #include <linux/reboot.h> | ||
7 | #include <linux/sysrq.h> | ||
8 | #include <linux/stop_machine.h> | ||
9 | #include <linux/freezer.h> | ||
10 | |||
11 | #include <xen/xenbus.h> | ||
12 | #include <xen/grant_table.h> | ||
13 | #include <xen/events.h> | ||
14 | #include <xen/hvc-console.h> | ||
15 | #include <xen/xen-ops.h> | ||
16 | |||
17 | #include <asm/xen/hypercall.h> | ||
18 | #include <asm/xen/page.h> | ||
19 | |||
20 | enum shutdown_state { | ||
21 | SHUTDOWN_INVALID = -1, | ||
22 | SHUTDOWN_POWEROFF = 0, | ||
23 | SHUTDOWN_SUSPEND = 2, | ||
24 | /* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only | ||
25 | report a crash, not be instructed to crash! | ||
26 | HALT is the same as POWEROFF, as far as we're concerned. The tools use | ||
27 | the distinction when we return the reason code to them. */ | ||
28 | SHUTDOWN_HALT = 4, | ||
29 | }; | ||
30 | |||
31 | /* Ignore multiple shutdown requests. */ | ||
32 | static enum shutdown_state shutting_down = SHUTDOWN_INVALID; | ||
33 | |||
34 | #ifdef CONFIG_PM_SLEEP | ||
35 | static int xen_suspend(void *data) | ||
36 | { | ||
37 | int *cancelled = data; | ||
38 | int err; | ||
39 | |||
40 | BUG_ON(!irqs_disabled()); | ||
41 | |||
42 | load_cr3(swapper_pg_dir); | ||
43 | |||
44 | err = device_power_down(PMSG_SUSPEND); | ||
45 | if (err) { | ||
46 | printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n", | ||
47 | err); | ||
48 | return err; | ||
49 | } | ||
50 | |||
51 | xen_mm_pin_all(); | ||
52 | gnttab_suspend(); | ||
53 | xen_pre_suspend(); | ||
54 | |||
55 | /* | ||
56 | * This hypercall returns 1 if suspend was cancelled | ||
57 | * or the domain was merely checkpointed, and 0 if it | ||
58 | * is resuming in a new domain. | ||
59 | */ | ||
60 | *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info)); | ||
61 | |||
62 | xen_post_suspend(*cancelled); | ||
63 | gnttab_resume(); | ||
64 | xen_mm_unpin_all(); | ||
65 | |||
66 | device_power_up(); | ||
67 | |||
68 | if (!*cancelled) { | ||
69 | xen_irq_resume(); | ||
70 | xen_console_resume(); | ||
71 | } | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static void do_suspend(void) | ||
77 | { | ||
78 | int err; | ||
79 | int cancelled = 1; | ||
80 | |||
81 | shutting_down = SHUTDOWN_SUSPEND; | ||
82 | |||
83 | #ifdef CONFIG_PREEMPT | ||
84 | /* If the kernel is preemptible, we need to freeze all the processes | ||
85 | to prevent them from being in the middle of a pagetable update | ||
86 | during suspend. */ | ||
87 | err = freeze_processes(); | ||
88 | if (err) { | ||
89 | printk(KERN_ERR "xen suspend: freeze failed %d\n", err); | ||
90 | return; | ||
91 | } | ||
92 | #endif | ||
93 | |||
94 | err = device_suspend(PMSG_SUSPEND); | ||
95 | if (err) { | ||
96 | printk(KERN_ERR "xen suspend: device_suspend %d\n", err); | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | printk("suspending xenbus...\n"); | ||
101 | /* XXX use normal device tree? */ | ||
102 | xenbus_suspend(); | ||
103 | |||
104 | err = stop_machine_run(xen_suspend, &cancelled, 0); | ||
105 | if (err) { | ||
106 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | ||
107 | goto out; | ||
108 | } | ||
109 | |||
110 | if (!cancelled) | ||
111 | xenbus_resume(); | ||
112 | else | ||
113 | xenbus_suspend_cancel(); | ||
114 | |||
115 | device_resume(); | ||
116 | |||
117 | /* Make sure timer events get retriggered on all CPUs */ | ||
118 | clock_was_set(); | ||
119 | out: | ||
120 | #ifdef CONFIG_PREEMPT | ||
121 | thaw_processes(); | ||
122 | #endif | ||
123 | shutting_down = SHUTDOWN_INVALID; | ||
124 | } | ||
125 | #endif /* CONFIG_PM_SLEEP */ | ||
126 | |||
127 | static void shutdown_handler(struct xenbus_watch *watch, | ||
128 | const char **vec, unsigned int len) | ||
129 | { | ||
130 | char *str; | ||
131 | struct xenbus_transaction xbt; | ||
132 | int err; | ||
133 | |||
134 | if (shutting_down != SHUTDOWN_INVALID) | ||
135 | return; | ||
136 | |||
137 | again: | ||
138 | err = xenbus_transaction_start(&xbt); | ||
139 | if (err) | ||
140 | return; | ||
141 | |||
142 | str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); | ||
143 | /* Ignore read errors and empty reads. */ | ||
144 | if (XENBUS_IS_ERR_READ(str)) { | ||
145 | xenbus_transaction_end(xbt, 1); | ||
146 | return; | ||
147 | } | ||
148 | |||
149 | xenbus_write(xbt, "control", "shutdown", ""); | ||
150 | |||
151 | err = xenbus_transaction_end(xbt, 0); | ||
152 | if (err == -EAGAIN) { | ||
153 | kfree(str); | ||
154 | goto again; | ||
155 | } | ||
156 | |||
157 | if (strcmp(str, "poweroff") == 0 || | ||
158 | strcmp(str, "halt") == 0) { | ||
159 | shutting_down = SHUTDOWN_POWEROFF; | ||
160 | orderly_poweroff(false); | ||
161 | } else if (strcmp(str, "reboot") == 0) { | ||
162 | shutting_down = SHUTDOWN_POWEROFF; /* ? */ | ||
163 | ctrl_alt_del(); | ||
164 | #ifdef CONFIG_PM_SLEEP | ||
165 | } else if (strcmp(str, "suspend") == 0) { | ||
166 | do_suspend(); | ||
167 | #endif | ||
168 | } else { | ||
169 | printk(KERN_INFO "Ignoring shutdown request: %s\n", str); | ||
170 | shutting_down = SHUTDOWN_INVALID; | ||
171 | } | ||
172 | |||
173 | kfree(str); | ||
174 | } | ||
175 | |||
176 | static void sysrq_handler(struct xenbus_watch *watch, const char **vec, | ||
177 | unsigned int len) | ||
178 | { | ||
179 | char sysrq_key = '\0'; | ||
180 | struct xenbus_transaction xbt; | ||
181 | int err; | ||
182 | |||
183 | again: | ||
184 | err = xenbus_transaction_start(&xbt); | ||
185 | if (err) | ||
186 | return; | ||
187 | if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { | ||
188 | printk(KERN_ERR "Unable to read sysrq code in " | ||
189 | "control/sysrq\n"); | ||
190 | xenbus_transaction_end(xbt, 1); | ||
191 | return; | ||
192 | } | ||
193 | |||
194 | if (sysrq_key != '\0') | ||
195 | xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); | ||
196 | |||
197 | err = xenbus_transaction_end(xbt, 0); | ||
198 | if (err == -EAGAIN) | ||
199 | goto again; | ||
200 | |||
201 | if (sysrq_key != '\0') | ||
202 | handle_sysrq(sysrq_key, NULL); | ||
203 | } | ||
204 | |||
205 | static struct xenbus_watch shutdown_watch = { | ||
206 | .node = "control/shutdown", | ||
207 | .callback = shutdown_handler | ||
208 | }; | ||
209 | |||
210 | static struct xenbus_watch sysrq_watch = { | ||
211 | .node = "control/sysrq", | ||
212 | .callback = sysrq_handler | ||
213 | }; | ||
214 | |||
215 | static int setup_shutdown_watcher(void) | ||
216 | { | ||
217 | int err; | ||
218 | |||
219 | err = register_xenbus_watch(&shutdown_watch); | ||
220 | if (err) { | ||
221 | printk(KERN_ERR "Failed to set shutdown watcher\n"); | ||
222 | return err; | ||
223 | } | ||
224 | |||
225 | err = register_xenbus_watch(&sysrq_watch); | ||
226 | if (err) { | ||
227 | printk(KERN_ERR "Failed to set sysrq watcher\n"); | ||
228 | return err; | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int shutdown_event(struct notifier_block *notifier, | ||
235 | unsigned long event, | ||
236 | void *data) | ||
237 | { | ||
238 | setup_shutdown_watcher(); | ||
239 | return NOTIFY_DONE; | ||
240 | } | ||
241 | |||
242 | static int __init setup_shutdown_event(void) | ||
243 | { | ||
244 | static struct notifier_block xenstore_notifier = { | ||
245 | .notifier_call = shutdown_event | ||
246 | }; | ||
247 | register_xenstore_notifier(&xenstore_notifier); | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | subsys_initcall(setup_shutdown_event); | ||
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 6efbe3f29ca5..090c61ee8fd0 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c | |||
@@ -203,7 +203,6 @@ int xb_read(void *data, unsigned len) | |||
203 | int xb_init_comms(void) | 203 | int xb_init_comms(void) |
204 | { | 204 | { |
205 | struct xenstore_domain_interface *intf = xen_store_interface; | 205 | struct xenstore_domain_interface *intf = xen_store_interface; |
206 | int err; | ||
207 | 206 | ||
208 | if (intf->req_prod != intf->req_cons) | 207 | if (intf->req_prod != intf->req_cons) |
209 | printk(KERN_ERR "XENBUS request ring is not quiescent " | 208 | printk(KERN_ERR "XENBUS request ring is not quiescent " |
@@ -216,18 +215,20 @@ int xb_init_comms(void) | |||
216 | intf->rsp_cons = intf->rsp_prod; | 215 | intf->rsp_cons = intf->rsp_prod; |
217 | } | 216 | } |
218 | 217 | ||
219 | if (xenbus_irq) | 218 | if (xenbus_irq) { |
220 | unbind_from_irqhandler(xenbus_irq, &xb_waitq); | 219 | /* Already have an irq; assume we're resuming */ |
220 | rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); | ||
221 | } else { | ||
222 | int err; | ||
223 | err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, | ||
224 | 0, "xenbus", &xb_waitq); | ||
225 | if (err <= 0) { | ||
226 | printk(KERN_ERR "XENBUS request irq failed %i\n", err); | ||
227 | return err; | ||
228 | } | ||
221 | 229 | ||
222 | err = bind_evtchn_to_irqhandler( | 230 | xenbus_irq = err; |
223 | xen_store_evtchn, wake_waiting, | ||
224 | 0, "xenbus", &xb_waitq); | ||
225 | if (err <= 0) { | ||
226 | printk(KERN_ERR "XENBUS request irq failed %i\n", err); | ||
227 | return err; | ||
228 | } | 231 | } |
229 | 232 | ||
230 | xenbus_irq = err; | ||
231 | |||
232 | return 0; | 233 | return 0; |
233 | } | 234 | } |