aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:34:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:34:37 -0500
commiteda670c626a4f53eb8ac5f20d8c10d3f0b54c583 (patch)
treee8b31fdeddd520b0fc56483f0a33c0501ee3b692 /arch
parentb746f9c7941f227ad582b4f0bc981f3adcbc46b2 (diff)
parent18c51e1a3fabb455ff1f5cd610097d89f577b8f7 (diff)
Merge tag 'stable/for-linus-3.13-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen updates from Konrad Rzeszutek Wilk: "This has tons of fixes and two major features which are concentrated around the Xen SWIOTLB library. The short <blurb> is that the tracing facility (just one function) has been added to SWIOTLB to make it easier to track I/O progress. Additionally under Xen and ARM (32 & 64) the Xen-SWIOTLB driver "is used to translate physical to machine and machine to physical addresses of foreign[guest] pages for DMA operations" (Stefano) when booting under hardware without proper IOMMU. There are also bug-fixes, cleanups, compile warning fixes, etc. The commit times for some of the commits is a bit fresh - that is b/c we wanted to make sure we have the Ack's from the ARM folks - which with the string of back-to-back conferences took a bit of time. Rest assured - the code has been stewing in #linux-next for some time. Features: - SWIOTLB has tracing added when doing bounce buffer. - Xen ARM/ARM64 can use Xen-SWIOTLB. This work allows Linux to safely program real devices for DMA operations when running as a guest on Xen on ARM, without IOMMU support. [*1] - xen_raw_printk works with PVHVM guests if needed. Bug-fixes: - Make memory ballooning work under HVM with large MMIO region. - Inform hypervisor of MCFG regions found in ACPI DSDT. - Remove deprecated IRQF_DISABLED. - Remove deprecated __cpuinit. [*1]: "On arm and arm64 all Xen guests, including dom0, run with second stage translation enabled. As a consequence when dom0 programs a device for a DMA operation is going to use (pseudo) physical addresses instead machine addresses. This work introduces two trees to track physical to machine and machine to physical mappings of foreign pages. Local pages are assumed mapped 1:1 (physical address == machine address). It enables the SWIOTLB-Xen driver on ARM and ARM64, so that Linux can translate physical addresses to machine addresses for dma operations when necessary. " (Stefano)" * tag 'stable/for-linus-3.13-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (32 commits) xen/arm: pfn_to_mfn and mfn_to_pfn return the argument if nothing is in the p2m arm,arm64/include/asm/io.h: define struct bio_vec swiotlb-xen: missing include dma-direction.h pci-swiotlb-xen: call pci_request_acs only ifdef CONFIG_PCI arm: make SWIOTLB available xen: delete new instances of added __cpuinit xen/balloon: Set balloon's initial state to number of existing RAM pages xen/mcfg: Call PHYSDEVOP_pci_mmcfg_reserved for MCFG areas. xen: remove deprecated IRQF_DISABLED x86/xen: remove deprecated IRQF_DISABLED swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary grant-table: call set_phys_to_machine after mapping grant refs arm,arm64: do not always merge biovec if we are running on Xen swiotlb: print a warning when the swiotlb is full swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device tracing/events: Fix swiotlb tracepoint creation swiotlb-xen: use xen_alloc/free_coherent_pages xen: introduce xen_alloc/free_coherent_pages ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig7
-rw-r--r--arch/arm/include/asm/dma-mapping.h46
-rw-r--r--arch/arm/include/asm/io.h9
-rw-r--r--arch/arm/include/asm/xen/hypervisor.h2
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h50
-rw-r--r--arch/arm/include/asm/xen/page.h44
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/mm.c65
-rw-r--r--arch/arm/xen/p2m.c208
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h14
-rw-r--r--arch/arm64/include/asm/io.h10
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h47
-rw-r--r--arch/arm64/xen/Makefile2
-rw-r--r--arch/ia64/include/asm/xen/page-coherent.h38
-rw-r--r--arch/x86/include/asm/xen/page-coherent.h38
-rw-r--r--arch/x86/xen/mmu.c15
-rw-r--r--arch/x86/xen/p2m.c6
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--arch/x86/xen/smp.c10
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/time.c3
23 files changed, 597 insertions, 28 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 00c1ff45a158..e089e622be79 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1862,6 +1862,12 @@ config CC_STACKPROTECTOR
1862 neutralized via a kernel panic. 1862 neutralized via a kernel panic.
1863 This feature requires gcc version 4.2 or above. 1863 This feature requires gcc version 4.2 or above.
1864 1864
1865config SWIOTLB
1866 def_bool y
1867
1868config IOMMU_HELPER
1869 def_bool SWIOTLB
1870
1865config XEN_DOM0 1871config XEN_DOM0
1866 def_bool y 1872 def_bool y
1867 depends on XEN 1873 depends on XEN
@@ -1872,6 +1878,7 @@ config XEN
1872 depends on CPU_V7 && !CPU_V6 1878 depends on CPU_V7 && !CPU_V6
1873 depends on !GENERIC_ATOMIC64 1879 depends on !GENERIC_ATOMIC64
1874 select ARM_PSCI 1880 select ARM_PSCI
1881 select SWIOTLB_XEN
1875 help 1882 help
1876 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. 1883 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
1877 1884
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 863cd84eb1a2..e701a4d9aa59 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -11,17 +11,28 @@
11#include <asm-generic/dma-coherent.h> 11#include <asm-generic/dma-coherent.h>
12#include <asm/memory.h> 12#include <asm/memory.h>
13 13
14#include <xen/xen.h>
15#include <asm/xen/hypervisor.h>
16
14#define DMA_ERROR_CODE (~0) 17#define DMA_ERROR_CODE (~0)
15extern struct dma_map_ops arm_dma_ops; 18extern struct dma_map_ops arm_dma_ops;
16extern struct dma_map_ops arm_coherent_dma_ops; 19extern struct dma_map_ops arm_coherent_dma_ops;
17 20
18static inline struct dma_map_ops *get_dma_ops(struct device *dev) 21static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
19{ 22{
20 if (dev && dev->archdata.dma_ops) 23 if (dev && dev->archdata.dma_ops)
21 return dev->archdata.dma_ops; 24 return dev->archdata.dma_ops;
22 return &arm_dma_ops; 25 return &arm_dma_ops;
23} 26}
24 27
28static inline struct dma_map_ops *get_dma_ops(struct device *dev)
29{
30 if (xen_initial_domain())
31 return xen_dma_ops;
32 else
33 return __generic_dma_ops(dev);
34}
35
25static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 36static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
26{ 37{
27 BUG_ON(!dev); 38 BUG_ON(!dev);
@@ -94,6 +105,39 @@ static inline unsigned long dma_max_pfn(struct device *dev)
94} 105}
95#define dma_max_pfn(dev) dma_max_pfn(dev) 106#define dma_max_pfn(dev) dma_max_pfn(dev)
96 107
108static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
109{
110 unsigned int offset = paddr & ~PAGE_MASK;
111 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
112}
113
114static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
115{
116 unsigned int offset = dev_addr & ~PAGE_MASK;
117 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
118}
119
120static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
121{
122 u64 limit, mask;
123
124 if (!dev->dma_mask)
125 return 0;
126
127 mask = *dev->dma_mask;
128
129 limit = (mask + 1) & ~mask;
130 if (limit && size > limit)
131 return 0;
132
133 if ((addr | (addr + size - 1)) & ~mask)
134 return 0;
135
136 return 1;
137}
138
139static inline void dma_mark_clean(void *addr, size_t size) { }
140
97/* 141/*
98 * DMA errors are defined by all-bits-set in the DMA address. 142 * DMA errors are defined by all-bits-set in the DMA address.
99 */ 143 */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d070741b2b37..3c597c222ef2 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -24,9 +24,11 @@
24#ifdef __KERNEL__ 24#ifdef __KERNEL__
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/blk_types.h>
27#include <asm/byteorder.h> 28#include <asm/byteorder.h>
28#include <asm/memory.h> 29#include <asm/memory.h>
29#include <asm-generic/pci_iomap.h> 30#include <asm-generic/pci_iomap.h>
31#include <xen/xen.h>
30 32
31/* 33/*
32 * ISA I/O bus memory addresses are 1:1 with the physical address. 34 * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -372,6 +374,13 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
372#define BIOVEC_MERGEABLE(vec1, vec2) \ 374#define BIOVEC_MERGEABLE(vec1, vec2) \
373 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 375 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
374 376
377struct bio_vec;
378extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
379 const struct bio_vec *vec2);
380#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
381 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
382 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
383
375#ifdef CONFIG_MMU 384#ifdef CONFIG_MMU
376#define ARCH_HAS_VALID_PHYS_ADDR_RANGE 385#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
377extern int valid_phys_addr_range(phys_addr_t addr, size_t size); 386extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index d7ab99a0c9eb..1317ee40f4df 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
16 return PARAVIRT_LAZY_NONE; 16 return PARAVIRT_LAZY_NONE;
17} 17}
18 18
19extern struct dma_map_ops *xen_dma_ops;
20
19#endif /* _ASM_ARM_XEN_HYPERVISOR_H */ 21#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..1109017499e5
--- /dev/null
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -0,0 +1,50 @@
1#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2#define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
13}
14
15static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
16 void *cpu_addr, dma_addr_t dma_handle,
17 struct dma_attrs *attrs)
18{
19 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
20}
21
22static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs)
25{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27}
28
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs)
32{
33 if (__generic_dma_ops(hwdev)->unmap_page)
34 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
35}
36
37static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
38 dma_addr_t handle, size_t size, enum dma_data_direction dir)
39{
40 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
41 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
42}
43
44static inline void xen_dma_sync_single_for_device(struct device *hwdev,
45 dma_addr_t handle, size_t size, enum dma_data_direction dir)
46{
47 if (__generic_dma_ops(hwdev)->sync_single_for_device)
48 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
49}
50#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 359a7b50b158..75579a9d6f76 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -6,12 +6,12 @@
6 6
7#include <linux/pfn.h> 7#include <linux/pfn.h>
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/dma-mapping.h>
9 10
11#include <xen/xen.h>
10#include <xen/interface/grant_table.h> 12#include <xen/interface/grant_table.h>
11 13
12#define pfn_to_mfn(pfn) (pfn)
13#define phys_to_machine_mapping_valid(pfn) (1) 14#define phys_to_machine_mapping_valid(pfn) (1)
14#define mfn_to_pfn(mfn) (mfn)
15#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 15#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
16 16
17#define pte_mfn pte_pfn 17#define pte_mfn pte_pfn
@@ -32,6 +32,38 @@ typedef struct xpaddr {
32 32
33#define INVALID_P2M_ENTRY (~0UL) 33#define INVALID_P2M_ENTRY (~0UL)
34 34
35unsigned long __pfn_to_mfn(unsigned long pfn);
36unsigned long __mfn_to_pfn(unsigned long mfn);
37extern struct rb_root phys_to_mach;
38
39static inline unsigned long pfn_to_mfn(unsigned long pfn)
40{
41 unsigned long mfn;
42
43 if (phys_to_mach.rb_node != NULL) {
44 mfn = __pfn_to_mfn(pfn);
45 if (mfn != INVALID_P2M_ENTRY)
46 return mfn;
47 }
48
49 return pfn;
50}
51
52static inline unsigned long mfn_to_pfn(unsigned long mfn)
53{
54 unsigned long pfn;
55
56 if (phys_to_mach.rb_node != NULL) {
57 pfn = __mfn_to_pfn(mfn);
58 if (pfn != INVALID_P2M_ENTRY)
59 return pfn;
60 }
61
62 return mfn;
63}
64
65#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
66
35static inline xmaddr_t phys_to_machine(xpaddr_t phys) 67static inline xmaddr_t phys_to_machine(xpaddr_t phys)
36{ 68{
37 unsigned offset = phys.paddr & ~PAGE_MASK; 69 unsigned offset = phys.paddr & ~PAGE_MASK;
@@ -76,11 +108,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte)
76 return 0; 108 return 0;
77} 109}
78 110
79static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) 111bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
80{ 112bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
81 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); 113 unsigned long nr_pages);
82 return true;
83}
84 114
85static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 115static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
86{ 116{
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 43841033afd3..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
obj-y := enlighten.o hypercall.o grant-table.o obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
new file mode 100644
index 000000000000..b0e77de99148
--- /dev/null
+++ b/arch/arm/xen/mm.c
@@ -0,0 +1,65 @@
1#include <linux/bootmem.h>
2#include <linux/gfp.h>
3#include <linux/export.h>
4#include <linux/slab.h>
5#include <linux/types.h>
6#include <linux/dma-mapping.h>
7#include <linux/vmalloc.h>
8#include <linux/swiotlb.h>
9
10#include <xen/xen.h>
11#include <xen/interface/memory.h>
12#include <xen/swiotlb-xen.h>
13
14#include <asm/cacheflush.h>
15#include <asm/xen/page.h>
16#include <asm/xen/hypercall.h>
17#include <asm/xen/interface.h>
18
19int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
20 unsigned int address_bits,
21 dma_addr_t *dma_handle)
22{
23 if (!xen_initial_domain())
24 return -EINVAL;
25
26 /* we assume that dom0 is mapped 1:1 for now */
27 *dma_handle = pstart;
28 return 0;
29}
30EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
31
32void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
33{
34 return;
35}
36EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
37
38struct dma_map_ops *xen_dma_ops;
39EXPORT_SYMBOL_GPL(xen_dma_ops);
40
41static struct dma_map_ops xen_swiotlb_dma_ops = {
42 .mapping_error = xen_swiotlb_dma_mapping_error,
43 .alloc = xen_swiotlb_alloc_coherent,
44 .free = xen_swiotlb_free_coherent,
45 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
46 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
47 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
48 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
49 .map_sg = xen_swiotlb_map_sg_attrs,
50 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
51 .map_page = xen_swiotlb_map_page,
52 .unmap_page = xen_swiotlb_unmap_page,
53 .dma_supported = xen_swiotlb_dma_supported,
54 .set_dma_mask = xen_swiotlb_set_dma_mask,
55};
56
57int __init xen_mm_init(void)
58{
59 if (!xen_initial_domain())
60 return 0;
61 xen_swiotlb_init(1, false);
62 xen_dma_ops = &xen_swiotlb_dma_ops;
63 return 0;
64}
65arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
new file mode 100644
index 000000000000..23732cdff551
--- /dev/null
+++ b/arch/arm/xen/p2m.c
@@ -0,0 +1,208 @@
1#include <linux/bootmem.h>
2#include <linux/gfp.h>
3#include <linux/export.h>
4#include <linux/rwlock.h>
5#include <linux/slab.h>
6#include <linux/types.h>
7#include <linux/dma-mapping.h>
8#include <linux/vmalloc.h>
9#include <linux/swiotlb.h>
10
11#include <xen/xen.h>
12#include <xen/interface/memory.h>
13#include <xen/swiotlb-xen.h>
14
15#include <asm/cacheflush.h>
16#include <asm/xen/page.h>
17#include <asm/xen/hypercall.h>
18#include <asm/xen/interface.h>
19
20struct xen_p2m_entry {
21 unsigned long pfn;
22 unsigned long mfn;
23 unsigned long nr_pages;
24 struct rb_node rbnode_mach;
25 struct rb_node rbnode_phys;
26};
27
28rwlock_t p2m_lock;
29struct rb_root phys_to_mach = RB_ROOT;
30static struct rb_root mach_to_phys = RB_ROOT;
31
32static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
33{
34 struct rb_node **link = &phys_to_mach.rb_node;
35 struct rb_node *parent = NULL;
36 struct xen_p2m_entry *entry;
37 int rc = 0;
38
39 while (*link) {
40 parent = *link;
41 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
42
43 if (new->mfn == entry->mfn)
44 goto err_out;
45 if (new->pfn == entry->pfn)
46 goto err_out;
47
48 if (new->pfn < entry->pfn)
49 link = &(*link)->rb_left;
50 else
51 link = &(*link)->rb_right;
52 }
53 rb_link_node(&new->rbnode_phys, parent, link);
54 rb_insert_color(&new->rbnode_phys, &phys_to_mach);
55 goto out;
56
57err_out:
58 rc = -EINVAL;
59 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
60 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
61out:
62 return rc;
63}
64
65unsigned long __pfn_to_mfn(unsigned long pfn)
66{
67 struct rb_node *n = phys_to_mach.rb_node;
68 struct xen_p2m_entry *entry;
69 unsigned long irqflags;
70
71 read_lock_irqsave(&p2m_lock, irqflags);
72 while (n) {
73 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
74 if (entry->pfn <= pfn &&
75 entry->pfn + entry->nr_pages > pfn) {
76 read_unlock_irqrestore(&p2m_lock, irqflags);
77 return entry->mfn + (pfn - entry->pfn);
78 }
79 if (pfn < entry->pfn)
80 n = n->rb_left;
81 else
82 n = n->rb_right;
83 }
84 read_unlock_irqrestore(&p2m_lock, irqflags);
85
86 return INVALID_P2M_ENTRY;
87}
88EXPORT_SYMBOL_GPL(__pfn_to_mfn);
89
90static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
91{
92 struct rb_node **link = &mach_to_phys.rb_node;
93 struct rb_node *parent = NULL;
94 struct xen_p2m_entry *entry;
95 int rc = 0;
96
97 while (*link) {
98 parent = *link;
99 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
100
101 if (new->mfn == entry->mfn)
102 goto err_out;
103 if (new->pfn == entry->pfn)
104 goto err_out;
105
106 if (new->mfn < entry->mfn)
107 link = &(*link)->rb_left;
108 else
109 link = &(*link)->rb_right;
110 }
111 rb_link_node(&new->rbnode_mach, parent, link);
112 rb_insert_color(&new->rbnode_mach, &mach_to_phys);
113 goto out;
114
115err_out:
116 rc = -EINVAL;
117 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
118 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
119out:
120 return rc;
121}
122
123unsigned long __mfn_to_pfn(unsigned long mfn)
124{
125 struct rb_node *n = mach_to_phys.rb_node;
126 struct xen_p2m_entry *entry;
127 unsigned long irqflags;
128
129 read_lock_irqsave(&p2m_lock, irqflags);
130 while (n) {
131 entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
132 if (entry->mfn <= mfn &&
133 entry->mfn + entry->nr_pages > mfn) {
134 read_unlock_irqrestore(&p2m_lock, irqflags);
135 return entry->pfn + (mfn - entry->mfn);
136 }
137 if (mfn < entry->mfn)
138 n = n->rb_left;
139 else
140 n = n->rb_right;
141 }
142 read_unlock_irqrestore(&p2m_lock, irqflags);
143
144 return INVALID_P2M_ENTRY;
145}
146EXPORT_SYMBOL_GPL(__mfn_to_pfn);
147
148bool __set_phys_to_machine_multi(unsigned long pfn,
149 unsigned long mfn, unsigned long nr_pages)
150{
151 int rc;
152 unsigned long irqflags;
153 struct xen_p2m_entry *p2m_entry;
154 struct rb_node *n = phys_to_mach.rb_node;
155
156 if (mfn == INVALID_P2M_ENTRY) {
157 write_lock_irqsave(&p2m_lock, irqflags);
158 while (n) {
159 p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
160 if (p2m_entry->pfn <= pfn &&
161 p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
162 rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
163 rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
164 write_unlock_irqrestore(&p2m_lock, irqflags);
165 kfree(p2m_entry);
166 return true;
167 }
168 if (pfn < p2m_entry->pfn)
169 n = n->rb_left;
170 else
171 n = n->rb_right;
172 }
173 write_unlock_irqrestore(&p2m_lock, irqflags);
174 return true;
175 }
176
177 p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
178 if (!p2m_entry) {
179 pr_warn("cannot allocate xen_p2m_entry\n");
180 return false;
181 }
182 p2m_entry->pfn = pfn;
183 p2m_entry->nr_pages = nr_pages;
184 p2m_entry->mfn = mfn;
185
186 write_lock_irqsave(&p2m_lock, irqflags);
187 if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
188 (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
189 write_unlock_irqrestore(&p2m_lock, irqflags);
190 return false;
191 }
192 write_unlock_irqrestore(&p2m_lock, irqflags);
193 return true;
194}
195EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
196
197bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
198{
199 return __set_phys_to_machine_multi(pfn, mfn, 1);
200}
201EXPORT_SYMBOL_GPL(__set_phys_to_machine);
202
203int p2m_init(void)
204{
205 rwlock_init(&p2m_lock);
206 return 0;
207}
208arch_initcall(p2m_init);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9714fe0403b7..88c8b6c1341a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -220,6 +220,7 @@ config XEN_DOM0
220config XEN 220config XEN
221 bool "Xen guest support on ARM64 (EXPERIMENTAL)" 221 bool "Xen guest support on ARM64 (EXPERIMENTAL)"
222 depends on ARM64 && OF 222 depends on ARM64 && OF
223 select SWIOTLB_XEN
223 help 224 help
224 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. 225 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
225 226
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 8d1810001aef..fd0c0c0e447a 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,11 +23,15 @@
23 23
24#include <asm-generic/dma-coherent.h> 24#include <asm-generic/dma-coherent.h>
25 25
26#include <xen/xen.h>
27#include <asm/xen/hypervisor.h>
28
26#define ARCH_HAS_DMA_GET_REQUIRED_MASK 29#define ARCH_HAS_DMA_GET_REQUIRED_MASK
27 30
31#define DMA_ERROR_CODE (~(dma_addr_t)0)
28extern struct dma_map_ops *dma_ops; 32extern struct dma_map_ops *dma_ops;
29 33
30static inline struct dma_map_ops *get_dma_ops(struct device *dev) 34static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
31{ 35{
32 if (unlikely(!dev) || !dev->archdata.dma_ops) 36 if (unlikely(!dev) || !dev->archdata.dma_ops)
33 return dma_ops; 37 return dma_ops;
@@ -35,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
35 return dev->archdata.dma_ops; 39 return dev->archdata.dma_ops;
36} 40}
37 41
42static inline struct dma_map_ops *get_dma_ops(struct device *dev)
43{
44 if (xen_initial_domain())
45 return xen_dma_ops;
46 else
47 return __generic_dma_ops(dev);
48}
49
38#include <asm-generic/dma-mapping-common.h> 50#include <asm-generic/dma-mapping-common.h>
39 51
40static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 52static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index b56e5b5df881..4cc813eddacb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,11 +22,14 @@
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23 23
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/blk_types.h>
25 26
26#include <asm/byteorder.h> 27#include <asm/byteorder.h>
27#include <asm/barrier.h> 28#include <asm/barrier.h>
28#include <asm/pgtable.h> 29#include <asm/pgtable.h>
29 30
31#include <xen/xen.h>
32
30/* 33/*
31 * Generic IO read/write. These perform native-endian accesses. 34 * Generic IO read/write. These perform native-endian accesses.
32 */ 35 */
@@ -263,5 +266,12 @@ extern int devmem_is_allowed(unsigned long pfn);
263 */ 266 */
264#define xlate_dev_kmem_ptr(p) p 267#define xlate_dev_kmem_ptr(p) p
265 268
269struct bio_vec;
270extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
271 const struct bio_vec *vec2);
272#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
273 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
274 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
275
266#endif /* __KERNEL__ */ 276#endif /* __KERNEL__ */
267#endif /* __ASM_IO_H */ 277#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..2820f1a6eebe
--- /dev/null
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -0,0 +1,47 @@
1#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
2#define _ASM_ARM64_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
13}
14
15static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
16 void *cpu_addr, dma_addr_t dma_handle,
17 struct dma_attrs *attrs)
18{
19 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
20}
21
22static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs)
25{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27}
28
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs)
32{
33 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
34}
35
36static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir)
38{
39 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
40}
41
42static inline void xen_dma_sync_single_for_device(struct device *hwdev,
43 dma_addr_t handle, size_t size, enum dma_data_direction dir)
44{
45 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
46}
47#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index be240404ba96..74a8d87e542b 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,2 +1,2 @@
1xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o) 1xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
2obj-y := xen-arm.o hypercall.o 2obj-y := xen-arm.o hypercall.o
diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..96e42f97fa1f
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
2#define _ASM_IA64_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 void *vstart = (void*)__get_free_pages(flags, get_order(size));
13 *dma_handle = virt_to_phys(vstart);
14 return vstart;
15}
16
17static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
18 void *cpu_addr, dma_addr_t dma_handle,
19 struct dma_attrs *attrs)
20{
21 free_pages((unsigned long) cpu_addr, get_order(size));
22}
23
24static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
25 unsigned long offset, size_t size, enum dma_data_direction dir,
26 struct dma_attrs *attrs) { }
27
28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
29 size_t size, enum dma_data_direction dir,
30 struct dma_attrs *attrs) { }
31
32static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
33 dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
34
35static inline void xen_dma_sync_single_for_device(struct device *hwdev,
36 dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
37
38#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..7f02fe4e2c7b
--- /dev/null
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_X86_XEN_PAGE_COHERENT_H
2#define _ASM_X86_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 void *vstart = (void*)__get_free_pages(flags, get_order(size));
13 *dma_handle = virt_to_phys(vstart);
14 return vstart;
15}
16
17static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
18 void *cpu_addr, dma_addr_t dma_handle,
19 struct dma_attrs *attrs)
20{
21 free_pages((unsigned long) cpu_addr, get_order(size));
22}
23
24static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
25 unsigned long offset, size_t size, enum dma_data_direction dir,
26 struct dma_attrs *attrs) { }
27
28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
29 size_t size, enum dma_data_direction dir,
30 struct dma_attrs *attrs) { }
31
32static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
33 dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
34
35static inline void xen_dma_sync_single_for_device(struct device *hwdev,
36 dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
37
38#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 49c962fe7e62..ce563be09cc1 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -468,8 +468,8 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
468 * 3 PCD PWT UC UC UC 468 * 3 PCD PWT UC UC UC
469 * 4 PAT WB WC WB 469 * 4 PAT WB WC WB
470 * 5 PAT PWT WC WP WT 470 * 5 PAT PWT WC WP WT
471 * 6 PAT PCD UC- UC UC- 471 * 6 PAT PCD UC- rsv UC-
472 * 7 PAT PCD PWT UC UC UC 472 * 7 PAT PCD PWT UC rsv UC
473 */ 473 */
474 474
475void xen_set_pat(u64 pat) 475void xen_set_pat(u64 pat)
@@ -2328,12 +2328,14 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2328 return success; 2328 return success;
2329} 2329}
2330 2330
2331int xen_create_contiguous_region(unsigned long vstart, unsigned int order, 2331int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2332 unsigned int address_bits) 2332 unsigned int address_bits,
2333 dma_addr_t *dma_handle)
2333{ 2334{
2334 unsigned long *in_frames = discontig_frames, out_frame; 2335 unsigned long *in_frames = discontig_frames, out_frame;
2335 unsigned long flags; 2336 unsigned long flags;
2336 int success; 2337 int success;
2338 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2337 2339
2338 /* 2340 /*
2339 * Currently an auto-translated guest will not perform I/O, nor will 2341 * Currently an auto-translated guest will not perform I/O, nor will
@@ -2368,15 +2370,17 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2368 2370
2369 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2371 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2370 2372
2373 *dma_handle = virt_to_machine(vstart).maddr;
2371 return success ? 0 : -ENOMEM; 2374 return success ? 0 : -ENOMEM;
2372} 2375}
2373EXPORT_SYMBOL_GPL(xen_create_contiguous_region); 2376EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2374 2377
2375void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) 2378void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2376{ 2379{
2377 unsigned long *out_frames = discontig_frames, in_frame; 2380 unsigned long *out_frames = discontig_frames, in_frame;
2378 unsigned long flags; 2381 unsigned long flags;
2379 int success; 2382 int success;
2383 unsigned long vstart;
2380 2384
2381 if (xen_feature(XENFEAT_auto_translated_physmap)) 2385 if (xen_feature(XENFEAT_auto_translated_physmap))
2382 return; 2386 return;
@@ -2384,6 +2388,7 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2384 if (unlikely(order > MAX_CONTIG_ORDER)) 2388 if (unlikely(order > MAX_CONTIG_ORDER))
2385 return; 2389 return;
2386 2390
2391 vstart = (unsigned long)phys_to_virt(pstart);
2387 memset((void *) vstart, 0, PAGE_SIZE << order); 2392 memset((void *) vstart, 0, PAGE_SIZE << order);
2388 2393
2389 spin_lock_irqsave(&xen_reservation_lock, flags); 2394 spin_lock_irqsave(&xen_reservation_lock, flags);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index a61c7d5811be..2ae8699e8767 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -799,10 +799,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
799{ 799{
800 unsigned topidx, mididx, idx; 800 unsigned topidx, mididx, idx;
801 801
802 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { 802 /* don't track P2M changes in autotranslate guests */
803 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); 803 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
804 return true; 804 return true;
805 } 805
806 if (unlikely(pfn >= MAX_P2M_PFN)) { 806 if (unlikely(pfn >= MAX_P2M_PFN)) {
807 BUG_ON(mfn != INVALID_P2M_ENTRY); 807 BUG_ON(mfn != INVALID_P2M_ENTRY);
808 return true; 808 return true;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 969570491c39..0e98e5d241d0 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -75,8 +75,10 @@ void __init pci_xen_swiotlb_init(void)
75 xen_swiotlb_init(1, true /* early */); 75 xen_swiotlb_init(1, true /* early */);
76 dma_ops = &xen_swiotlb_dma_ops; 76 dma_ops = &xen_swiotlb_dma_ops;
77 77
78#ifdef CONFIG_PCI
78 /* Make sure ACS will be enabled */ 79 /* Make sure ACS will be enabled */
79 pci_request_acs(); 80 pci_request_acs();
81#endif
80 } 82 }
81} 83}
82 84
@@ -92,8 +94,10 @@ int pci_xen_swiotlb_init_late(void)
92 return rc; 94 return rc;
93 95
94 dma_ops = &xen_swiotlb_dma_ops; 96 dma_ops = &xen_swiotlb_dma_ops;
97#ifdef CONFIG_PCI
95 /* Make sure ACS will be enabled */ 98 /* Make sure ACS will be enabled */
96 pci_request_acs(); 99 pci_request_acs();
100#endif
97 101
98 return 0; 102 return 0;
99} 103}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 09f3059cb00b..68c054f59de6 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,7 +556,7 @@ void xen_enable_syscall(void)
556 } 556 }
557#endif /* CONFIG_X86_64 */ 557#endif /* CONFIG_X86_64 */
558} 558}
559void __cpuinit xen_enable_nmi(void) 559void xen_enable_nmi(void)
560{ 560{
561#ifdef CONFIG_X86_64 561#ifdef CONFIG_X86_64
562 if (register_callback(CALLBACKTYPE_nmi, nmi)) 562 if (register_callback(CALLBACKTYPE_nmi, nmi))
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 31d04758b76f..c36b325abd83 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -149,7 +149,7 @@ static int xen_smp_intr_init(unsigned int cpu)
149 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 149 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
150 cpu, 150 cpu,
151 xen_reschedule_interrupt, 151 xen_reschedule_interrupt,
152 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 152 IRQF_PERCPU|IRQF_NOBALANCING,
153 resched_name, 153 resched_name,
154 NULL); 154 NULL);
155 if (rc < 0) 155 if (rc < 0)
@@ -161,7 +161,7 @@ static int xen_smp_intr_init(unsigned int cpu)
161 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 161 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
162 cpu, 162 cpu,
163 xen_call_function_interrupt, 163 xen_call_function_interrupt,
164 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 164 IRQF_PERCPU|IRQF_NOBALANCING,
165 callfunc_name, 165 callfunc_name,
166 NULL); 166 NULL);
167 if (rc < 0) 167 if (rc < 0)
@@ -171,7 +171,7 @@ static int xen_smp_intr_init(unsigned int cpu)
171 171
172 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 172 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
173 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 173 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
174 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, 174 IRQF_PERCPU | IRQF_NOBALANCING,
175 debug_name, NULL); 175 debug_name, NULL);
176 if (rc < 0) 176 if (rc < 0)
177 goto fail; 177 goto fail;
@@ -182,7 +182,7 @@ static int xen_smp_intr_init(unsigned int cpu)
182 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 182 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
183 cpu, 183 cpu,
184 xen_call_function_single_interrupt, 184 xen_call_function_single_interrupt,
185 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 185 IRQF_PERCPU|IRQF_NOBALANCING,
186 callfunc_name, 186 callfunc_name,
187 NULL); 187 NULL);
188 if (rc < 0) 188 if (rc < 0)
@@ -201,7 +201,7 @@ static int xen_smp_intr_init(unsigned int cpu)
201 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, 201 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
202 cpu, 202 cpu,
203 xen_irq_work_interrupt, 203 xen_irq_work_interrupt,
204 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 204 IRQF_PERCPU|IRQF_NOBALANCING,
205 callfunc_name, 205 callfunc_name,
206 NULL); 206 NULL);
207 if (rc < 0) 207 if (rc < 0)
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index be6b86078957..0e36cde12f7e 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -234,7 +234,7 @@ void xen_init_lock_cpu(int cpu)
234 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, 234 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
235 cpu, 235 cpu,
236 dummy_handler, 236 dummy_handler,
237 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 237 IRQF_PERCPU|IRQF_NOBALANCING,
238 name, 238 name,
239 NULL); 239 NULL);
240 240
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index ee365895b06b..12a1ca707b94 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -443,8 +443,7 @@ void xen_setup_timer(int cpu)
443 name = "<timer kasprintf failed>"; 443 name = "<timer kasprintf failed>";
444 444
445 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 445 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
446 IRQF_DISABLED|IRQF_PERCPU| 446 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
447 IRQF_NOBALANCING|IRQF_TIMER|
448 IRQF_FORCE_RESUME, 447 IRQF_FORCE_RESUME,
449 name, NULL); 448 name, NULL);
450 449