diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-06 18:58:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-06 18:58:06 -0400 |
commit | 0e1dc4274828f64fcb56fc7b950acdc5ff7a395f (patch) | |
tree | 0855a6e189dede21e9e2dd0094774089b1c7d8d2 | |
parent | 3d54ac9e35a69d19381420bb2fa1702d5bf73846 (diff) | |
parent | 8746515d7f04c9ea94cf43e2db1fd2cfca93276d (diff) |
Merge tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen bug fixes from David Vrabel:
- fix blkback regression if using persistent grants
- fix various event channel related suspend/resume bugs
- fix AMD x86 regression with X86_BUG_SYSRET_SS_ATTRS
- SWIOTLB on ARM now uses frames <4 GiB (if available) so device only
capable of 32-bit DMA work.
* tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen: Add __GFP_DMA flag when xen_swiotlb_init gets free pages on ARM
hypervisor/x86/xen: Unset X86_BUG_SYSRET_SS_ATTRS on Xen PV guests
xen/events: Set irq_info->evtchn before binding the channel to CPU in __startup_pirq()
xen/console: Update console event channel on resume
xen/xenbus: Update xenbus event channel on resume
xen/events: Clear cpu_evtchn_mask before resuming
xen-pciback: Add name prefix to global 'permissive' variable
xen: Suspend ticks on all CPUs during suspend
xen/grant: introduce func gnttab_unmap_refs_sync()
xen/blkback: safely unmap purge persistent grants
-rw-r--r-- | arch/arm/include/asm/xen/page.h | 1 | ||||
-rw-r--r-- | arch/arm/xen/mm.c | 15 | ||||
-rw-r--r-- | arch/x86/include/asm/hypervisor.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/page.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/hypervisor.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 27 | ||||
-rw-r--r-- | arch/x86/xen/suspend.c | 10 | ||||
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 35 | ||||
-rw-r--r-- | drivers/tty/hvc/hvc_xen.c | 18 | ||||
-rw-r--r-- | drivers/xen/events/events_2l.c | 10 | ||||
-rw-r--r-- | drivers/xen/events/events_base.c | 7 | ||||
-rw-r--r-- | drivers/xen/gntdev.c | 28 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 28 | ||||
-rw-r--r-- | drivers/xen/manage.c | 9 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 2 | ||||
-rw-r--r-- | drivers/xen/xen-pciback/conf_space.c | 6 | ||||
-rw-r--r-- | drivers/xen/xen-pciback/conf_space.h | 2 | ||||
-rw-r--r-- | drivers/xen/xen-pciback/conf_space_header.c | 2 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_probe.c | 29 | ||||
-rw-r--r-- | include/xen/grant_table.h | 1 | ||||
-rw-r--r-- | include/xen/xen-ops.h | 1 |
21 files changed, 168 insertions, 74 deletions
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 2f7e6ff67d51..0b579b2f4e0e 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
110 | bool xen_arch_need_swiotlb(struct device *dev, | 110 | bool xen_arch_need_swiotlb(struct device *dev, |
111 | unsigned long pfn, | 111 | unsigned long pfn, |
112 | unsigned long mfn); | 112 | unsigned long mfn); |
113 | unsigned long xen_get_swiotlb_free_pages(unsigned int order); | ||
113 | 114 | ||
114 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 115 | #endif /* _ASM_ARM_XEN_PAGE_H */ |
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 793551d15f1d..498325074a06 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/gfp.h> | 4 | #include <linux/gfp.h> |
5 | #include <linux/highmem.h> | 5 | #include <linux/highmem.h> |
6 | #include <linux/export.h> | 6 | #include <linux/export.h> |
7 | #include <linux/memblock.h> | ||
7 | #include <linux/of_address.h> | 8 | #include <linux/of_address.h> |
8 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
@@ -21,6 +22,20 @@ | |||
21 | #include <asm/xen/hypercall.h> | 22 | #include <asm/xen/hypercall.h> |
22 | #include <asm/xen/interface.h> | 23 | #include <asm/xen/interface.h> |
23 | 24 | ||
25 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) | ||
26 | { | ||
27 | struct memblock_region *reg; | ||
28 | gfp_t flags = __GFP_NOWARN; | ||
29 | |||
30 | for_each_memblock(memory, reg) { | ||
31 | if (reg->base < (phys_addr_t)0xffffffff) { | ||
32 | flags |= __GFP_DMA; | ||
33 | break; | ||
34 | } | ||
35 | } | ||
36 | return __get_free_pages(flags, order); | ||
37 | } | ||
38 | |||
24 | enum dma_cache_op { | 39 | enum dma_cache_op { |
25 | DMA_UNMAP, | 40 | DMA_UNMAP, |
26 | DMA_MAP, | 41 | DMA_MAP, |
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index e42f758a0fbd..055ea9941dd5 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h | |||
@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper; | |||
50 | /* Recognized hypervisors */ | 50 | /* Recognized hypervisors */ |
51 | extern const struct hypervisor_x86 x86_hyper_vmware; | 51 | extern const struct hypervisor_x86 x86_hyper_vmware; |
52 | extern const struct hypervisor_x86 x86_hyper_ms_hyperv; | 52 | extern const struct hypervisor_x86 x86_hyper_ms_hyperv; |
53 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; | 53 | extern const struct hypervisor_x86 x86_hyper_xen; |
54 | extern const struct hypervisor_x86 x86_hyper_kvm; | 54 | extern const struct hypervisor_x86 x86_hyper_kvm; |
55 | 55 | ||
56 | extern void init_hypervisor(struct cpuinfo_x86 *c); | 56 | extern void init_hypervisor(struct cpuinfo_x86 *c); |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 358dcd338915..c44a5d53e464 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev, | |||
269 | return false; | 269 | return false; |
270 | } | 270 | } |
271 | 271 | ||
272 | static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) | ||
273 | { | ||
274 | return __get_free_pages(__GFP_NOWARN, order); | ||
275 | } | ||
276 | |||
272 | #endif /* _ASM_X86_XEN_PAGE_H */ | 277 | #endif /* _ASM_X86_XEN_PAGE_H */ |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 36ce402a3fa5..d820d8eae96b 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -27,8 +27,8 @@ | |||
27 | 27 | ||
28 | static const __initconst struct hypervisor_x86 * const hypervisors[] = | 28 | static const __initconst struct hypervisor_x86 * const hypervisors[] = |
29 | { | 29 | { |
30 | #ifdef CONFIG_XEN_PVHVM | 30 | #ifdef CONFIG_XEN |
31 | &x86_hyper_xen_hvm, | 31 | &x86_hyper_xen, |
32 | #endif | 32 | #endif |
33 | &x86_hyper_vmware, | 33 | &x86_hyper_vmware, |
34 | &x86_hyper_ms_hyperv, | 34 | &x86_hyper_ms_hyperv, |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 94578efd3067..46957ead3060 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = { | |||
1760 | 1760 | ||
1761 | static void __init xen_hvm_guest_init(void) | 1761 | static void __init xen_hvm_guest_init(void) |
1762 | { | 1762 | { |
1763 | if (xen_pv_domain()) | ||
1764 | return; | ||
1765 | |||
1763 | init_hvm_pv_info(); | 1766 | init_hvm_pv_info(); |
1764 | 1767 | ||
1765 | xen_hvm_init_shared_info(); | 1768 | xen_hvm_init_shared_info(); |
@@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void) | |||
1775 | xen_hvm_init_time_ops(); | 1778 | xen_hvm_init_time_ops(); |
1776 | xen_hvm_init_mmu_ops(); | 1779 | xen_hvm_init_mmu_ops(); |
1777 | } | 1780 | } |
1781 | #endif | ||
1778 | 1782 | ||
1779 | static bool xen_nopv = false; | 1783 | static bool xen_nopv = false; |
1780 | static __init int xen_parse_nopv(char *arg) | 1784 | static __init int xen_parse_nopv(char *arg) |
@@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg) | |||
1784 | } | 1788 | } |
1785 | early_param("xen_nopv", xen_parse_nopv); | 1789 | early_param("xen_nopv", xen_parse_nopv); |
1786 | 1790 | ||
1787 | static uint32_t __init xen_hvm_platform(void) | 1791 | static uint32_t __init xen_platform(void) |
1788 | { | 1792 | { |
1789 | if (xen_nopv) | 1793 | if (xen_nopv) |
1790 | return 0; | 1794 | return 0; |
1791 | 1795 | ||
1792 | if (xen_pv_domain()) | ||
1793 | return 0; | ||
1794 | |||
1795 | return xen_cpuid_base(); | 1796 | return xen_cpuid_base(); |
1796 | } | 1797 | } |
1797 | 1798 | ||
@@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void) | |||
1809 | } | 1810 | } |
1810 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); | 1811 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); |
1811 | 1812 | ||
1812 | const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { | 1813 | static void xen_set_cpu_features(struct cpuinfo_x86 *c) |
1813 | .name = "Xen HVM", | 1814 | { |
1814 | .detect = xen_hvm_platform, | 1815 | if (xen_pv_domain()) |
1816 | clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); | ||
1817 | } | ||
1818 | |||
1819 | const struct hypervisor_x86 x86_hyper_xen = { | ||
1820 | .name = "Xen", | ||
1821 | .detect = xen_platform, | ||
1822 | #ifdef CONFIG_XEN_PVHVM | ||
1815 | .init_platform = xen_hvm_guest_init, | 1823 | .init_platform = xen_hvm_guest_init, |
1824 | #endif | ||
1816 | .x2apic_available = xen_x2apic_para_available, | 1825 | .x2apic_available = xen_x2apic_para_available, |
1826 | .set_cpu_features = xen_set_cpu_features, | ||
1817 | }; | 1827 | }; |
1818 | EXPORT_SYMBOL(x86_hyper_xen_hvm); | 1828 | EXPORT_SYMBOL(x86_hyper_xen); |
1819 | #endif | ||
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index d9497698645a..53b4c0811f4f 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data) | |||
88 | tick_resume_local(); | 88 | tick_resume_local(); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void xen_vcpu_notify_suspend(void *data) | ||
92 | { | ||
93 | tick_suspend_local(); | ||
94 | } | ||
95 | |||
91 | void xen_arch_resume(void) | 96 | void xen_arch_resume(void) |
92 | { | 97 | { |
93 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); | 98 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); |
94 | } | 99 | } |
100 | |||
101 | void xen_arch_suspend(void) | ||
102 | { | ||
103 | on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); | ||
104 | } | ||
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index bd2b3bbbb22c..713fc9ff1149 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif, | |||
265 | atomic_dec(&blkif->persistent_gnt_in_use); | 265 | atomic_dec(&blkif->persistent_gnt_in_use); |
266 | } | 266 | } |
267 | 267 | ||
268 | static void free_persistent_gnts_unmap_callback(int result, | ||
269 | struct gntab_unmap_queue_data *data) | ||
270 | { | ||
271 | struct completion *c = data->data; | ||
272 | |||
273 | /* BUG_ON used to reproduce existing behaviour, | ||
274 | but is this the best way to deal with this? */ | ||
275 | BUG_ON(result); | ||
276 | complete(c); | ||
277 | } | ||
278 | |||
279 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | 268 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, |
280 | unsigned int num) | 269 | unsigned int num) |
281 | { | 270 | { |
@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
285 | struct rb_node *n; | 274 | struct rb_node *n; |
286 | int segs_to_unmap = 0; | 275 | int segs_to_unmap = 0; |
287 | struct gntab_unmap_queue_data unmap_data; | 276 | struct gntab_unmap_queue_data unmap_data; |
288 | struct completion unmap_completion; | ||
289 | 277 | ||
290 | init_completion(&unmap_completion); | ||
291 | |||
292 | unmap_data.data = &unmap_completion; | ||
293 | unmap_data.done = &free_persistent_gnts_unmap_callback; | ||
294 | unmap_data.pages = pages; | 278 | unmap_data.pages = pages; |
295 | unmap_data.unmap_ops = unmap; | 279 | unmap_data.unmap_ops = unmap; |
296 | unmap_data.kunmap_ops = NULL; | 280 | unmap_data.kunmap_ops = NULL; |
@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
310 | !rb_next(&persistent_gnt->node)) { | 294 | !rb_next(&persistent_gnt->node)) { |
311 | 295 | ||
312 | unmap_data.count = segs_to_unmap; | 296 | unmap_data.count = segs_to_unmap; |
313 | gnttab_unmap_refs_async(&unmap_data); | 297 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
314 | wait_for_completion(&unmap_completion); | ||
315 | 298 | ||
316 | put_free_pages(blkif, pages, segs_to_unmap); | 299 | put_free_pages(blkif, pages, segs_to_unmap); |
317 | segs_to_unmap = 0; | 300 | segs_to_unmap = 0; |
@@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) | |||
329 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 312 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
330 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 313 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
331 | struct persistent_gnt *persistent_gnt; | 314 | struct persistent_gnt *persistent_gnt; |
332 | int ret, segs_to_unmap = 0; | 315 | int segs_to_unmap = 0; |
333 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); | 316 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); |
317 | struct gntab_unmap_queue_data unmap_data; | ||
318 | |||
319 | unmap_data.pages = pages; | ||
320 | unmap_data.unmap_ops = unmap; | ||
321 | unmap_data.kunmap_ops = NULL; | ||
334 | 322 | ||
335 | while(!list_empty(&blkif->persistent_purge_list)) { | 323 | while(!list_empty(&blkif->persistent_purge_list)) { |
336 | persistent_gnt = list_first_entry(&blkif->persistent_purge_list, | 324 | persistent_gnt = list_first_entry(&blkif->persistent_purge_list, |
@@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) | |||
346 | pages[segs_to_unmap] = persistent_gnt->page; | 334 | pages[segs_to_unmap] = persistent_gnt->page; |
347 | 335 | ||
348 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 336 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
349 | ret = gnttab_unmap_refs(unmap, NULL, pages, | 337 | unmap_data.count = segs_to_unmap; |
350 | segs_to_unmap); | 338 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
351 | BUG_ON(ret); | ||
352 | put_free_pages(blkif, pages, segs_to_unmap); | 339 | put_free_pages(blkif, pages, segs_to_unmap); |
353 | segs_to_unmap = 0; | 340 | segs_to_unmap = 0; |
354 | } | 341 | } |
355 | kfree(persistent_gnt); | 342 | kfree(persistent_gnt); |
356 | } | 343 | } |
357 | if (segs_to_unmap > 0) { | 344 | if (segs_to_unmap > 0) { |
358 | ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); | 345 | unmap_data.count = segs_to_unmap; |
359 | BUG_ON(ret); | 346 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
360 | put_free_pages(blkif, pages, segs_to_unmap); | 347 | put_free_pages(blkif, pages, segs_to_unmap); |
361 | } | 348 | } |
362 | } | 349 | } |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index f1e57425e39f..5bab1c684bb1 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void) | |||
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | 301 | ||
302 | static void xen_console_update_evtchn(struct xencons_info *info) | ||
303 | { | ||
304 | if (xen_hvm_domain()) { | ||
305 | uint64_t v; | ||
306 | int err; | ||
307 | |||
308 | err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); | ||
309 | if (!err && v) | ||
310 | info->evtchn = v; | ||
311 | } else | ||
312 | info->evtchn = xen_start_info->console.domU.evtchn; | ||
313 | } | ||
314 | |||
302 | void xen_console_resume(void) | 315 | void xen_console_resume(void) |
303 | { | 316 | { |
304 | struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); | 317 | struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); |
305 | if (info != NULL && info->irq) | 318 | if (info != NULL && info->irq) { |
319 | if (!xen_initial_domain()) | ||
320 | xen_console_update_evtchn(info); | ||
306 | rebind_evtchn_irq(info->evtchn, info->irq); | 321 | rebind_evtchn_irq(info->evtchn, info->irq); |
322 | } | ||
307 | } | 323 | } |
308 | 324 | ||
309 | static void xencons_disconnect_backend(struct xencons_info *info) | 325 | static void xencons_disconnect_backend(struct xencons_info *info) |
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index 5db43fc100a4..7dd46312c180 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c | |||
@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
345 | return IRQ_HANDLED; | 345 | return IRQ_HANDLED; |
346 | } | 346 | } |
347 | 347 | ||
348 | static void evtchn_2l_resume(void) | ||
349 | { | ||
350 | int i; | ||
351 | |||
352 | for_each_online_cpu(i) | ||
353 | memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * | ||
354 | EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); | ||
355 | } | ||
356 | |||
348 | static const struct evtchn_ops evtchn_ops_2l = { | 357 | static const struct evtchn_ops evtchn_ops_2l = { |
349 | .max_channels = evtchn_2l_max_channels, | 358 | .max_channels = evtchn_2l_max_channels, |
350 | .nr_channels = evtchn_2l_max_channels, | 359 | .nr_channels = evtchn_2l_max_channels, |
@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = { | |||
356 | .mask = evtchn_2l_mask, | 365 | .mask = evtchn_2l_mask, |
357 | .unmask = evtchn_2l_unmask, | 366 | .unmask = evtchn_2l_unmask, |
358 | .handle_events = evtchn_2l_handle_events, | 367 | .handle_events = evtchn_2l_handle_events, |
368 | .resume = evtchn_2l_resume, | ||
359 | }; | 369 | }; |
360 | 370 | ||
361 | void __init xen_evtchn_2l_init(void) | 371 | void __init xen_evtchn_2l_init(void) |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 70fba973a107..2b8553bd8715 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
529 | if (rc) | 529 | if (rc) |
530 | goto err; | 530 | goto err; |
531 | 531 | ||
532 | bind_evtchn_to_cpu(evtchn, 0); | ||
533 | info->evtchn = evtchn; | 532 | info->evtchn = evtchn; |
533 | bind_evtchn_to_cpu(evtchn, 0); | ||
534 | 534 | ||
535 | rc = xen_evtchn_port_setup(info); | 535 | rc = xen_evtchn_port_setup(info); |
536 | if (rc) | 536 | if (rc) |
@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1279 | 1279 | ||
1280 | mutex_unlock(&irq_mapping_update_lock); | 1280 | mutex_unlock(&irq_mapping_update_lock); |
1281 | 1281 | ||
1282 | /* new event channels are always bound to cpu 0 */ | 1282 | bind_evtchn_to_cpu(evtchn, info->cpu); |
1283 | irq_set_affinity(irq, cpumask_of(0)); | 1283 | /* This will be deferred until interrupt is processed */ |
1284 | irq_set_affinity(irq, cpumask_of(info->cpu)); | ||
1284 | 1285 | ||
1285 | /* Unmask the event channel. */ | 1286 | /* Unmask the event channel. */ |
1286 | enable_irq(irq); | 1287 | enable_irq(irq); |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index d5bb1a33d0a3..89274850741b 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map) | |||
327 | return err; | 327 | return err; |
328 | } | 328 | } |
329 | 329 | ||
330 | struct unmap_grant_pages_callback_data | ||
331 | { | ||
332 | struct completion completion; | ||
333 | int result; | ||
334 | }; | ||
335 | |||
336 | static void unmap_grant_callback(int result, | ||
337 | struct gntab_unmap_queue_data *data) | ||
338 | { | ||
339 | struct unmap_grant_pages_callback_data* d = data->data; | ||
340 | |||
341 | d->result = result; | ||
342 | complete(&d->completion); | ||
343 | } | ||
344 | |||
345 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | 330 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) |
346 | { | 331 | { |
347 | int i, err = 0; | 332 | int i, err = 0; |
348 | struct gntab_unmap_queue_data unmap_data; | 333 | struct gntab_unmap_queue_data unmap_data; |
349 | struct unmap_grant_pages_callback_data data; | ||
350 | |||
351 | init_completion(&data.completion); | ||
352 | unmap_data.data = &data; | ||
353 | unmap_data.done= &unmap_grant_callback; | ||
354 | 334 | ||
355 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { | 335 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
356 | int pgno = (map->notify.addr >> PAGE_SHIFT); | 336 | int pgno = (map->notify.addr >> PAGE_SHIFT); |
@@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
367 | unmap_data.pages = map->pages + offset; | 347 | unmap_data.pages = map->pages + offset; |
368 | unmap_data.count = pages; | 348 | unmap_data.count = pages; |
369 | 349 | ||
370 | gnttab_unmap_refs_async(&unmap_data); | 350 | err = gnttab_unmap_refs_sync(&unmap_data); |
371 | 351 | if (err) | |
372 | wait_for_completion(&data.completion); | 352 | return err; |
373 | if (data.result) | ||
374 | return data.result; | ||
375 | 353 | ||
376 | for (i = 0; i < pages; i++) { | 354 | for (i = 0; i < pages; i++) { |
377 | if (map->unmap_ops[offset+i].status) | 355 | if (map->unmap_ops[offset+i].status) |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 17972fbacddc..b1c7170e5c9e 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -123,6 +123,11 @@ struct gnttab_ops { | |||
123 | int (*query_foreign_access)(grant_ref_t ref); | 123 | int (*query_foreign_access)(grant_ref_t ref); |
124 | }; | 124 | }; |
125 | 125 | ||
126 | struct unmap_refs_callback_data { | ||
127 | struct completion completion; | ||
128 | int result; | ||
129 | }; | ||
130 | |||
126 | static struct gnttab_ops *gnttab_interface; | 131 | static struct gnttab_ops *gnttab_interface; |
127 | 132 | ||
128 | static int grant_table_version; | 133 | static int grant_table_version; |
@@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) | |||
863 | } | 868 | } |
864 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); | 869 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); |
865 | 870 | ||
871 | static void unmap_refs_callback(int result, | ||
872 | struct gntab_unmap_queue_data *data) | ||
873 | { | ||
874 | struct unmap_refs_callback_data *d = data->data; | ||
875 | |||
876 | d->result = result; | ||
877 | complete(&d->completion); | ||
878 | } | ||
879 | |||
880 | int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) | ||
881 | { | ||
882 | struct unmap_refs_callback_data data; | ||
883 | |||
884 | init_completion(&data.completion); | ||
885 | item->data = &data; | ||
886 | item->done = &unmap_refs_callback; | ||
887 | gnttab_unmap_refs_async(item); | ||
888 | wait_for_completion(&data.completion); | ||
889 | |||
890 | return data.result; | ||
891 | } | ||
892 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); | ||
893 | |||
866 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) | 894 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) |
867 | { | 895 | { |
868 | int rc; | 896 | int rc; |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index bf1940706422..9e6a85104a20 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -131,6 +131,8 @@ static void do_suspend(void) | |||
131 | goto out_resume; | 131 | goto out_resume; |
132 | } | 132 | } |
133 | 133 | ||
134 | xen_arch_suspend(); | ||
135 | |||
134 | si.cancelled = 1; | 136 | si.cancelled = 1; |
135 | 137 | ||
136 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); | 138 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); |
@@ -148,11 +150,12 @@ static void do_suspend(void) | |||
148 | si.cancelled = 1; | 150 | si.cancelled = 1; |
149 | } | 151 | } |
150 | 152 | ||
153 | xen_arch_resume(); | ||
154 | |||
151 | out_resume: | 155 | out_resume: |
152 | if (!si.cancelled) { | 156 | if (!si.cancelled) |
153 | xen_arch_resume(); | ||
154 | xs_resume(); | 157 | xs_resume(); |
155 | } else | 158 | else |
156 | xs_suspend_cancel(); | 159 | xs_suspend_cancel(); |
157 | 160 | ||
158 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); | 161 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 810ad419e34c..4c549323c605 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -235,7 +235,7 @@ retry: | |||
235 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | 235 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
236 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 236 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
237 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | 237 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
238 | xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); | 238 | xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); |
239 | if (xen_io_tlb_start) | 239 | if (xen_io_tlb_start) |
240 | break; | 240 | break; |
241 | order--; | 241 | order--; |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 75fe3d466515..9c234209d8b5 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
@@ -16,8 +16,8 @@ | |||
16 | #include "conf_space.h" | 16 | #include "conf_space.h" |
17 | #include "conf_space_quirks.h" | 17 | #include "conf_space_quirks.h" |
18 | 18 | ||
19 | bool permissive; | 19 | bool xen_pcibk_permissive; |
20 | module_param(permissive, bool, 0644); | 20 | module_param_named(permissive, xen_pcibk_permissive, bool, 0644); |
21 | 21 | ||
22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, | 22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, |
23 | * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ | 23 | * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ |
@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) | |||
262 | * This means that some fields may still be read-only because | 262 | * This means that some fields may still be read-only because |
263 | * they have entries in the config_field list that intercept | 263 | * they have entries in the config_field list that intercept |
264 | * the write and do nothing. */ | 264 | * the write and do nothing. */ |
265 | if (dev_data->permissive || permissive) { | 265 | if (dev_data->permissive || xen_pcibk_permissive) { |
266 | switch (size) { | 266 | switch (size) { |
267 | case 1: | 267 | case 1: |
268 | err = pci_write_config_byte(dev, offset, | 268 | err = pci_write_config_byte(dev, offset, |
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index 2e1d73d1d5d0..62461a8ba1d6 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h | |||
@@ -64,7 +64,7 @@ struct config_field_entry { | |||
64 | void *data; | 64 | void *data; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | extern bool permissive; | 67 | extern bool xen_pcibk_permissive; |
68 | 68 | ||
69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) | 69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) |
70 | 70 | ||
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c2260a0456c9..ad3d17d29c81 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
@@ -118,7 +118,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
118 | 118 | ||
119 | cmd->val = value; | 119 | cmd->val = value; |
120 | 120 | ||
121 | if (!permissive && (!dev_data || !dev_data->permissive)) | 121 | if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive)) |
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | /* Only allow the guest to control certain bits. */ | 124 | /* Only allow the guest to control certain bits. */ |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 564b31584860..5390a674b5e3 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <xen/xen.h> | 57 | #include <xen/xen.h> |
58 | #include <xen/xenbus.h> | 58 | #include <xen/xenbus.h> |
59 | #include <xen/events.h> | 59 | #include <xen/events.h> |
60 | #include <xen/xen-ops.h> | ||
60 | #include <xen/page.h> | 61 | #include <xen/page.h> |
61 | 62 | ||
62 | #include <xen/hvm.h> | 63 | #include <xen/hvm.h> |
@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void) | |||
735 | return err; | 736 | return err; |
736 | } | 737 | } |
737 | 738 | ||
739 | static int xenbus_resume_cb(struct notifier_block *nb, | ||
740 | unsigned long action, void *data) | ||
741 | { | ||
742 | int err = 0; | ||
743 | |||
744 | if (xen_hvm_domain()) { | ||
745 | uint64_t v; | ||
746 | |||
747 | err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); | ||
748 | if (!err && v) | ||
749 | xen_store_evtchn = v; | ||
750 | else | ||
751 | pr_warn("Cannot update xenstore event channel: %d\n", | ||
752 | err); | ||
753 | } else | ||
754 | xen_store_evtchn = xen_start_info->store_evtchn; | ||
755 | |||
756 | return err; | ||
757 | } | ||
758 | |||
759 | static struct notifier_block xenbus_resume_nb = { | ||
760 | .notifier_call = xenbus_resume_cb, | ||
761 | }; | ||
762 | |||
738 | static int __init xenbus_init(void) | 763 | static int __init xenbus_init(void) |
739 | { | 764 | { |
740 | int err = 0; | 765 | int err = 0; |
@@ -793,6 +818,10 @@ static int __init xenbus_init(void) | |||
793 | goto out_error; | 818 | goto out_error; |
794 | } | 819 | } |
795 | 820 | ||
821 | if ((xen_store_domain_type != XS_LOCAL) && | ||
822 | (xen_store_domain_type != XS_UNKNOWN)) | ||
823 | xen_resume_notifier_register(&xenbus_resume_nb); | ||
824 | |||
796 | #ifdef CONFIG_XEN_COMPAT_XENFS | 825 | #ifdef CONFIG_XEN_COMPAT_XENFS |
797 | /* | 826 | /* |
798 | * Create xenfs mountpoint in /proc for compatibility with | 827 | * Create xenfs mountpoint in /proc for compatibility with |
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 143ca5ffab7a..4478f4b4aae2 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h | |||
@@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
191 | struct gnttab_unmap_grant_ref *kunmap_ops, | 191 | struct gnttab_unmap_grant_ref *kunmap_ops, |
192 | struct page **pages, unsigned int count); | 192 | struct page **pages, unsigned int count); |
193 | void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); | 193 | void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); |
194 | int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item); | ||
194 | 195 | ||
195 | 196 | ||
196 | /* Perform a batch of grant map/copy operations. Retry every batch slot | 197 | /* Perform a batch of grant map/copy operations. Retry every batch slot |
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index c643e6a94c9a..0ce4f32017ea 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled); | |||
13 | 13 | ||
14 | void xen_timer_resume(void); | 14 | void xen_timer_resume(void); |
15 | void xen_arch_resume(void); | 15 | void xen_arch_resume(void); |
16 | void xen_arch_suspend(void); | ||
16 | 17 | ||
17 | void xen_resume_notifier_register(struct notifier_block *nb); | 18 | void xen_resume_notifier_register(struct notifier_block *nb); |
18 | void xen_resume_notifier_unregister(struct notifier_block *nb); | 19 | void xen_resume_notifier_unregister(struct notifier_block *nb); |