diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-11-08 15:36:09 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-11-08 16:10:48 -0500 |
commit | e1d8f62ad49a6a7068aa1bdc30252911d71c4dc4 (patch) | |
tree | e7ad9bf58ba9b58bf48ff59283ba0c27b03969d1 | |
parent | bad97817dece759dd6c0b24f862b7d0ed588edda (diff) | |
parent | 15177608c703e7b4aa29aa7c93b31001effe504c (diff) |
Merge remote-tracking branch 'stefano/swiotlb-xen-9.1' into stable/for-linus-3.13
* stefano/swiotlb-xen-9.1:
swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs
swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary
grant-table: call set_phys_to_machine after mapping grant refs
arm,arm64: do not always merge biovec if we are running on Xen
swiotlb: print a warning when the swiotlb is full
swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device
xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device
swiotlb-xen: use xen_alloc/free_coherent_pages
xen: introduce xen_alloc/free_coherent_pages
arm64/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain
arm/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain
swiotlb-xen: introduce xen_swiotlb_set_dma_mask
xen/arm,arm64: enable SWIOTLB_XEN
xen: make xen_create_contiguous_region return the dma address
xen/x86: allow __set_phys_to_machine for autotranslate guests
arm/xen,arm64/xen: introduce p2m
arm64: define DMA_ERROR_CODE
arm: make SWIOTLB available
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Conflicts:
arch/arm/include/asm/dma-mapping.h
drivers/xen/swiotlb-xen.c
[Conflicts arose b/c "arm: make SWIOTLB available" v8 was in Stefano's
branch, while I had v9 + Ack from Russel. I also fixed up white-space
issues]
-rw-r--r-- | arch/arm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 13 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/hypervisor.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page-coherent.h | 50 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page.h | 50 | ||||
-rw-r--r-- | arch/arm/xen/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/xen/mm.c | 65 | ||||
-rw-r--r-- | arch/arm/xen/p2m.c | 208 | ||||
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/dma-mapping.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/io.h | 9 | ||||
-rw-r--r-- | arch/arm64/include/asm/xen/page-coherent.h | 47 | ||||
-rw-r--r-- | arch/arm64/xen/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/xen/page-coherent.h | 38 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/page-coherent.h | 38 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 11 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 6 | ||||
-rw-r--r-- | drivers/xen/Kconfig | 1 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 19 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 115 | ||||
-rw-r--r-- | include/xen/swiotlb-xen.h | 2 | ||||
-rw-r--r-- | include/xen/xen-ops.h | 7 | ||||
-rw-r--r-- | lib/swiotlb.c | 1 |
24 files changed, 661 insertions, 49 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b08374f8fe3b..01f7013c85c7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1888,6 +1888,7 @@ config XEN | |||
1888 | depends on CPU_V7 && !CPU_V6 | 1888 | depends on CPU_V7 && !CPU_V6 |
1889 | depends on !GENERIC_ATOMIC64 | 1889 | depends on !GENERIC_ATOMIC64 |
1890 | select ARM_PSCI | 1890 | select ARM_PSCI |
1891 | select SWIOTLB_XEN | ||
1891 | help | 1892 | help |
1892 | Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. | 1893 | Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. |
1893 | 1894 | ||
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1ad2c171054b..8acfef48124a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -11,17 +11,28 @@ | |||
11 | #include <asm-generic/dma-coherent.h> | 11 | #include <asm-generic/dma-coherent.h> |
12 | #include <asm/memory.h> | 12 | #include <asm/memory.h> |
13 | 13 | ||
14 | #include <xen/xen.h> | ||
15 | #include <asm/xen/hypervisor.h> | ||
16 | |||
14 | #define DMA_ERROR_CODE (~0) | 17 | #define DMA_ERROR_CODE (~0) |
15 | extern struct dma_map_ops arm_dma_ops; | 18 | extern struct dma_map_ops arm_dma_ops; |
16 | extern struct dma_map_ops arm_coherent_dma_ops; | 19 | extern struct dma_map_ops arm_coherent_dma_ops; |
17 | 20 | ||
18 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | 21 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
19 | { | 22 | { |
20 | if (dev && dev->archdata.dma_ops) | 23 | if (dev && dev->archdata.dma_ops) |
21 | return dev->archdata.dma_ops; | 24 | return dev->archdata.dma_ops; |
22 | return &arm_dma_ops; | 25 | return &arm_dma_ops; |
23 | } | 26 | } |
24 | 27 | ||
28 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | ||
29 | { | ||
30 | if (xen_initial_domain()) | ||
31 | return xen_dma_ops; | ||
32 | else | ||
33 | return __generic_dma_ops(dev); | ||
34 | } | ||
35 | |||
25 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | 36 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) |
26 | { | 37 | { |
27 | BUG_ON(!dev); | 38 | BUG_ON(!dev); |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d070741b2b37..c45effc18312 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -24,9 +24,11 @@ | |||
24 | #ifdef __KERNEL__ | 24 | #ifdef __KERNEL__ |
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/blk_types.h> | ||
27 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
28 | #include <asm/memory.h> | 29 | #include <asm/memory.h> |
29 | #include <asm-generic/pci_iomap.h> | 30 | #include <asm-generic/pci_iomap.h> |
31 | #include <xen/xen.h> | ||
30 | 32 | ||
31 | /* | 33 | /* |
32 | * ISA I/O bus memory addresses are 1:1 with the physical address. | 34 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
@@ -372,6 +374,12 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | |||
372 | #define BIOVEC_MERGEABLE(vec1, vec2) \ | 374 | #define BIOVEC_MERGEABLE(vec1, vec2) \ |
373 | ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) | 375 | ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) |
374 | 376 | ||
377 | extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, | ||
378 | const struct bio_vec *vec2); | ||
379 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | ||
380 | (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ | ||
381 | (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) | ||
382 | |||
375 | #ifdef CONFIG_MMU | 383 | #ifdef CONFIG_MMU |
376 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 384 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
377 | extern int valid_phys_addr_range(phys_addr_t addr, size_t size); | 385 | extern int valid_phys_addr_range(phys_addr_t addr, size_t size); |
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index d7ab99a0c9eb..1317ee40f4df 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h | |||
@@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |||
16 | return PARAVIRT_LAZY_NONE; | 16 | return PARAVIRT_LAZY_NONE; |
17 | } | 17 | } |
18 | 18 | ||
19 | extern struct dma_map_ops *xen_dma_ops; | ||
20 | |||
19 | #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ | 21 | #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ |
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h new file mode 100644 index 000000000000..1109017499e5 --- /dev/null +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H | ||
2 | #define _ASM_ARM_XEN_PAGE_COHERENT_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
9 | dma_addr_t *dma_handle, gfp_t flags, | ||
10 | struct dma_attrs *attrs) | ||
11 | { | ||
12 | return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); | ||
13 | } | ||
14 | |||
15 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
16 | void *cpu_addr, dma_addr_t dma_handle, | ||
17 | struct dma_attrs *attrs) | ||
18 | { | ||
19 | __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); | ||
20 | } | ||
21 | |||
22 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
24 | struct dma_attrs *attrs) | ||
25 | { | ||
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
27 | } | ||
28 | |||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
30 | size_t size, enum dma_data_direction dir, | ||
31 | struct dma_attrs *attrs) | ||
32 | { | ||
33 | if (__generic_dma_ops(hwdev)->unmap_page) | ||
34 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
35 | } | ||
36 | |||
37 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
38 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
39 | { | ||
40 | if (__generic_dma_ops(hwdev)->sync_single_for_cpu) | ||
41 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
42 | } | ||
43 | |||
44 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
45 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
46 | { | ||
47 | if (__generic_dma_ops(hwdev)->sync_single_for_device) | ||
48 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
49 | } | ||
50 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 359a7b50b158..71bb779f2761 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -6,12 +6,12 @@ | |||
6 | 6 | ||
7 | #include <linux/pfn.h> | 7 | #include <linux/pfn.h> |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/dma-mapping.h> | ||
9 | 10 | ||
11 | #include <xen/xen.h> | ||
10 | #include <xen/interface/grant_table.h> | 12 | #include <xen/interface/grant_table.h> |
11 | 13 | ||
12 | #define pfn_to_mfn(pfn) (pfn) | ||
13 | #define phys_to_machine_mapping_valid(pfn) (1) | 14 | #define phys_to_machine_mapping_valid(pfn) (1) |
14 | #define mfn_to_pfn(mfn) (mfn) | ||
15 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) | 15 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) |
16 | 16 | ||
17 | #define pte_mfn pte_pfn | 17 | #define pte_mfn pte_pfn |
@@ -32,6 +32,44 @@ typedef struct xpaddr { | |||
32 | 32 | ||
33 | #define INVALID_P2M_ENTRY (~0UL) | 33 | #define INVALID_P2M_ENTRY (~0UL) |
34 | 34 | ||
35 | unsigned long __pfn_to_mfn(unsigned long pfn); | ||
36 | unsigned long __mfn_to_pfn(unsigned long mfn); | ||
37 | extern struct rb_root phys_to_mach; | ||
38 | |||
39 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | ||
40 | { | ||
41 | unsigned long mfn; | ||
42 | |||
43 | if (phys_to_mach.rb_node != NULL) { | ||
44 | mfn = __pfn_to_mfn(pfn); | ||
45 | if (mfn != INVALID_P2M_ENTRY) | ||
46 | return mfn; | ||
47 | } | ||
48 | |||
49 | if (xen_initial_domain()) | ||
50 | return pfn; | ||
51 | else | ||
52 | return INVALID_P2M_ENTRY; | ||
53 | } | ||
54 | |||
55 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | ||
56 | { | ||
57 | unsigned long pfn; | ||
58 | |||
59 | if (phys_to_mach.rb_node != NULL) { | ||
60 | pfn = __mfn_to_pfn(mfn); | ||
61 | if (pfn != INVALID_P2M_ENTRY) | ||
62 | return pfn; | ||
63 | } | ||
64 | |||
65 | if (xen_initial_domain()) | ||
66 | return mfn; | ||
67 | else | ||
68 | return INVALID_P2M_ENTRY; | ||
69 | } | ||
70 | |||
71 | #define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) | ||
72 | |||
35 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) | 73 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) |
36 | { | 74 | { |
37 | unsigned offset = phys.paddr & ~PAGE_MASK; | 75 | unsigned offset = phys.paddr & ~PAGE_MASK; |
@@ -76,11 +114,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte) | |||
76 | return 0; | 114 | return 0; |
77 | } | 115 | } |
78 | 116 | ||
79 | static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 117 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
80 | { | 118 | bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, |
81 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | 119 | unsigned long nr_pages); |
82 | return true; | ||
83 | } | ||
84 | 120 | ||
85 | static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 121 | static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
86 | { | 122 | { |
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile index 43841033afd3..12969523414c 100644 --- a/arch/arm/xen/Makefile +++ b/arch/arm/xen/Makefile | |||
@@ -1 +1 @@ | |||
obj-y := enlighten.o hypercall.o grant-table.o | obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o | ||
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c new file mode 100644 index 000000000000..b0e77de99148 --- /dev/null +++ b/arch/arm/xen/mm.c | |||
@@ -0,0 +1,65 @@ | |||
1 | #include <linux/bootmem.h> | ||
2 | #include <linux/gfp.h> | ||
3 | #include <linux/export.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | #include <linux/vmalloc.h> | ||
8 | #include <linux/swiotlb.h> | ||
9 | |||
10 | #include <xen/xen.h> | ||
11 | #include <xen/interface/memory.h> | ||
12 | #include <xen/swiotlb-xen.h> | ||
13 | |||
14 | #include <asm/cacheflush.h> | ||
15 | #include <asm/xen/page.h> | ||
16 | #include <asm/xen/hypercall.h> | ||
17 | #include <asm/xen/interface.h> | ||
18 | |||
19 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, | ||
20 | unsigned int address_bits, | ||
21 | dma_addr_t *dma_handle) | ||
22 | { | ||
23 | if (!xen_initial_domain()) | ||
24 | return -EINVAL; | ||
25 | |||
26 | /* we assume that dom0 is mapped 1:1 for now */ | ||
27 | *dma_handle = pstart; | ||
28 | return 0; | ||
29 | } | ||
30 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | ||
31 | |||
32 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) | ||
33 | { | ||
34 | return; | ||
35 | } | ||
36 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); | ||
37 | |||
38 | struct dma_map_ops *xen_dma_ops; | ||
39 | EXPORT_SYMBOL_GPL(xen_dma_ops); | ||
40 | |||
41 | static struct dma_map_ops xen_swiotlb_dma_ops = { | ||
42 | .mapping_error = xen_swiotlb_dma_mapping_error, | ||
43 | .alloc = xen_swiotlb_alloc_coherent, | ||
44 | .free = xen_swiotlb_free_coherent, | ||
45 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, | ||
46 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, | ||
47 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, | ||
48 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, | ||
49 | .map_sg = xen_swiotlb_map_sg_attrs, | ||
50 | .unmap_sg = xen_swiotlb_unmap_sg_attrs, | ||
51 | .map_page = xen_swiotlb_map_page, | ||
52 | .unmap_page = xen_swiotlb_unmap_page, | ||
53 | .dma_supported = xen_swiotlb_dma_supported, | ||
54 | .set_dma_mask = xen_swiotlb_set_dma_mask, | ||
55 | }; | ||
56 | |||
57 | int __init xen_mm_init(void) | ||
58 | { | ||
59 | if (!xen_initial_domain()) | ||
60 | return 0; | ||
61 | xen_swiotlb_init(1, false); | ||
62 | xen_dma_ops = &xen_swiotlb_dma_ops; | ||
63 | return 0; | ||
64 | } | ||
65 | arch_initcall(xen_mm_init); | ||
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c new file mode 100644 index 000000000000..23732cdff551 --- /dev/null +++ b/arch/arm/xen/p2m.c | |||
@@ -0,0 +1,208 @@ | |||
1 | #include <linux/bootmem.h> | ||
2 | #include <linux/gfp.h> | ||
3 | #include <linux/export.h> | ||
4 | #include <linux/rwlock.h> | ||
5 | #include <linux/slab.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/dma-mapping.h> | ||
8 | #include <linux/vmalloc.h> | ||
9 | #include <linux/swiotlb.h> | ||
10 | |||
11 | #include <xen/xen.h> | ||
12 | #include <xen/interface/memory.h> | ||
13 | #include <xen/swiotlb-xen.h> | ||
14 | |||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/xen/page.h> | ||
17 | #include <asm/xen/hypercall.h> | ||
18 | #include <asm/xen/interface.h> | ||
19 | |||
20 | struct xen_p2m_entry { | ||
21 | unsigned long pfn; | ||
22 | unsigned long mfn; | ||
23 | unsigned long nr_pages; | ||
24 | struct rb_node rbnode_mach; | ||
25 | struct rb_node rbnode_phys; | ||
26 | }; | ||
27 | |||
28 | rwlock_t p2m_lock; | ||
29 | struct rb_root phys_to_mach = RB_ROOT; | ||
30 | static struct rb_root mach_to_phys = RB_ROOT; | ||
31 | |||
32 | static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) | ||
33 | { | ||
34 | struct rb_node **link = &phys_to_mach.rb_node; | ||
35 | struct rb_node *parent = NULL; | ||
36 | struct xen_p2m_entry *entry; | ||
37 | int rc = 0; | ||
38 | |||
39 | while (*link) { | ||
40 | parent = *link; | ||
41 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); | ||
42 | |||
43 | if (new->mfn == entry->mfn) | ||
44 | goto err_out; | ||
45 | if (new->pfn == entry->pfn) | ||
46 | goto err_out; | ||
47 | |||
48 | if (new->pfn < entry->pfn) | ||
49 | link = &(*link)->rb_left; | ||
50 | else | ||
51 | link = &(*link)->rb_right; | ||
52 | } | ||
53 | rb_link_node(&new->rbnode_phys, parent, link); | ||
54 | rb_insert_color(&new->rbnode_phys, &phys_to_mach); | ||
55 | goto out; | ||
56 | |||
57 | err_out: | ||
58 | rc = -EINVAL; | ||
59 | pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", | ||
60 | __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); | ||
61 | out: | ||
62 | return rc; | ||
63 | } | ||
64 | |||
65 | unsigned long __pfn_to_mfn(unsigned long pfn) | ||
66 | { | ||
67 | struct rb_node *n = phys_to_mach.rb_node; | ||
68 | struct xen_p2m_entry *entry; | ||
69 | unsigned long irqflags; | ||
70 | |||
71 | read_lock_irqsave(&p2m_lock, irqflags); | ||
72 | while (n) { | ||
73 | entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); | ||
74 | if (entry->pfn <= pfn && | ||
75 | entry->pfn + entry->nr_pages > pfn) { | ||
76 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
77 | return entry->mfn + (pfn - entry->pfn); | ||
78 | } | ||
79 | if (pfn < entry->pfn) | ||
80 | n = n->rb_left; | ||
81 | else | ||
82 | n = n->rb_right; | ||
83 | } | ||
84 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
85 | |||
86 | return INVALID_P2M_ENTRY; | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(__pfn_to_mfn); | ||
89 | |||
90 | static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new) | ||
91 | { | ||
92 | struct rb_node **link = &mach_to_phys.rb_node; | ||
93 | struct rb_node *parent = NULL; | ||
94 | struct xen_p2m_entry *entry; | ||
95 | int rc = 0; | ||
96 | |||
97 | while (*link) { | ||
98 | parent = *link; | ||
99 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach); | ||
100 | |||
101 | if (new->mfn == entry->mfn) | ||
102 | goto err_out; | ||
103 | if (new->pfn == entry->pfn) | ||
104 | goto err_out; | ||
105 | |||
106 | if (new->mfn < entry->mfn) | ||
107 | link = &(*link)->rb_left; | ||
108 | else | ||
109 | link = &(*link)->rb_right; | ||
110 | } | ||
111 | rb_link_node(&new->rbnode_mach, parent, link); | ||
112 | rb_insert_color(&new->rbnode_mach, &mach_to_phys); | ||
113 | goto out; | ||
114 | |||
115 | err_out: | ||
116 | rc = -EINVAL; | ||
117 | pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", | ||
118 | __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); | ||
119 | out: | ||
120 | return rc; | ||
121 | } | ||
122 | |||
123 | unsigned long __mfn_to_pfn(unsigned long mfn) | ||
124 | { | ||
125 | struct rb_node *n = mach_to_phys.rb_node; | ||
126 | struct xen_p2m_entry *entry; | ||
127 | unsigned long irqflags; | ||
128 | |||
129 | read_lock_irqsave(&p2m_lock, irqflags); | ||
130 | while (n) { | ||
131 | entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach); | ||
132 | if (entry->mfn <= mfn && | ||
133 | entry->mfn + entry->nr_pages > mfn) { | ||
134 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
135 | return entry->pfn + (mfn - entry->mfn); | ||
136 | } | ||
137 | if (mfn < entry->mfn) | ||
138 | n = n->rb_left; | ||
139 | else | ||
140 | n = n->rb_right; | ||
141 | } | ||
142 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
143 | |||
144 | return INVALID_P2M_ENTRY; | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(__mfn_to_pfn); | ||
147 | |||
148 | bool __set_phys_to_machine_multi(unsigned long pfn, | ||
149 | unsigned long mfn, unsigned long nr_pages) | ||
150 | { | ||
151 | int rc; | ||
152 | unsigned long irqflags; | ||
153 | struct xen_p2m_entry *p2m_entry; | ||
154 | struct rb_node *n = phys_to_mach.rb_node; | ||
155 | |||
156 | if (mfn == INVALID_P2M_ENTRY) { | ||
157 | write_lock_irqsave(&p2m_lock, irqflags); | ||
158 | while (n) { | ||
159 | p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); | ||
160 | if (p2m_entry->pfn <= pfn && | ||
161 | p2m_entry->pfn + p2m_entry->nr_pages > pfn) { | ||
162 | rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys); | ||
163 | rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); | ||
164 | write_unlock_irqrestore(&p2m_lock, irqflags); | ||
165 | kfree(p2m_entry); | ||
166 | return true; | ||
167 | } | ||
168 | if (pfn < p2m_entry->pfn) | ||
169 | n = n->rb_left; | ||
170 | else | ||
171 | n = n->rb_right; | ||
172 | } | ||
173 | write_unlock_irqrestore(&p2m_lock, irqflags); | ||
174 | return true; | ||
175 | } | ||
176 | |||
177 | p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT); | ||
178 | if (!p2m_entry) { | ||
179 | pr_warn("cannot allocate xen_p2m_entry\n"); | ||
180 | return false; | ||
181 | } | ||
182 | p2m_entry->pfn = pfn; | ||
183 | p2m_entry->nr_pages = nr_pages; | ||
184 | p2m_entry->mfn = mfn; | ||
185 | |||
186 | write_lock_irqsave(&p2m_lock, irqflags); | ||
187 | if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || | ||
188 | (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) { | ||
189 | write_unlock_irqrestore(&p2m_lock, irqflags); | ||
190 | return false; | ||
191 | } | ||
192 | write_unlock_irqrestore(&p2m_lock, irqflags); | ||
193 | return true; | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi); | ||
196 | |||
197 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
198 | { | ||
199 | return __set_phys_to_machine_multi(pfn, mfn, 1); | ||
200 | } | ||
201 | EXPORT_SYMBOL_GPL(__set_phys_to_machine); | ||
202 | |||
203 | int p2m_init(void) | ||
204 | { | ||
205 | rwlock_init(&p2m_lock); | ||
206 | return 0; | ||
207 | } | ||
208 | arch_initcall(p2m_init); | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c04454876bcb..04ffafb6fbe9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -211,6 +211,7 @@ config XEN_DOM0 | |||
211 | config XEN | 211 | config XEN |
212 | bool "Xen guest support on ARM64 (EXPERIMENTAL)" | 212 | bool "Xen guest support on ARM64 (EXPERIMENTAL)" |
213 | depends on ARM64 && OF | 213 | depends on ARM64 && OF |
214 | select SWIOTLB_XEN | ||
214 | help | 215 | help |
215 | Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. | 216 | Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. |
216 | 217 | ||
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 8d1810001aef..fd0c0c0e447a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -23,11 +23,15 @@ | |||
23 | 23 | ||
24 | #include <asm-generic/dma-coherent.h> | 24 | #include <asm-generic/dma-coherent.h> |
25 | 25 | ||
26 | #include <xen/xen.h> | ||
27 | #include <asm/xen/hypervisor.h> | ||
28 | |||
26 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK | 29 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
27 | 30 | ||
31 | #define DMA_ERROR_CODE (~(dma_addr_t)0) | ||
28 | extern struct dma_map_ops *dma_ops; | 32 | extern struct dma_map_ops *dma_ops; |
29 | 33 | ||
30 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | 34 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
31 | { | 35 | { |
32 | if (unlikely(!dev) || !dev->archdata.dma_ops) | 36 | if (unlikely(!dev) || !dev->archdata.dma_ops) |
33 | return dma_ops; | 37 | return dma_ops; |
@@ -35,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
35 | return dev->archdata.dma_ops; | 39 | return dev->archdata.dma_ops; |
36 | } | 40 | } |
37 | 41 | ||
42 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | ||
43 | { | ||
44 | if (xen_initial_domain()) | ||
45 | return xen_dma_ops; | ||
46 | else | ||
47 | return __generic_dma_ops(dev); | ||
48 | } | ||
49 | |||
38 | #include <asm-generic/dma-mapping-common.h> | 50 | #include <asm-generic/dma-mapping-common.h> |
39 | 51 | ||
40 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 52 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 1d12f89140ba..c163287b9871 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -22,11 +22,14 @@ | |||
22 | #ifdef __KERNEL__ | 22 | #ifdef __KERNEL__ |
23 | 23 | ||
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | #include <linux/blk_types.h> | ||
25 | 26 | ||
26 | #include <asm/byteorder.h> | 27 | #include <asm/byteorder.h> |
27 | #include <asm/barrier.h> | 28 | #include <asm/barrier.h> |
28 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
29 | 30 | ||
31 | #include <xen/xen.h> | ||
32 | |||
30 | /* | 33 | /* |
31 | * Generic IO read/write. These perform native-endian accesses. | 34 | * Generic IO read/write. These perform native-endian accesses. |
32 | */ | 35 | */ |
@@ -263,5 +266,11 @@ extern int devmem_is_allowed(unsigned long pfn); | |||
263 | */ | 266 | */ |
264 | #define xlate_dev_kmem_ptr(p) p | 267 | #define xlate_dev_kmem_ptr(p) p |
265 | 268 | ||
269 | extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, | ||
270 | const struct bio_vec *vec2); | ||
271 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | ||
272 | (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ | ||
273 | (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) | ||
274 | |||
266 | #endif /* __KERNEL__ */ | 275 | #endif /* __KERNEL__ */ |
267 | #endif /* __ASM_IO_H */ | 276 | #endif /* __ASM_IO_H */ |
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h new file mode 100644 index 000000000000..2820f1a6eebe --- /dev/null +++ b/arch/arm64/include/asm/xen/page-coherent.h | |||
@@ -0,0 +1,47 @@ | |||
1 | #ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H | ||
2 | #define _ASM_ARM64_XEN_PAGE_COHERENT_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
9 | dma_addr_t *dma_handle, gfp_t flags, | ||
10 | struct dma_attrs *attrs) | ||
11 | { | ||
12 | return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); | ||
13 | } | ||
14 | |||
15 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
16 | void *cpu_addr, dma_addr_t dma_handle, | ||
17 | struct dma_attrs *attrs) | ||
18 | { | ||
19 | __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); | ||
20 | } | ||
21 | |||
22 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
24 | struct dma_attrs *attrs) | ||
25 | { | ||
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
27 | } | ||
28 | |||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
30 | size_t size, enum dma_data_direction dir, | ||
31 | struct dma_attrs *attrs) | ||
32 | { | ||
33 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
34 | } | ||
35 | |||
36 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
37 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
38 | { | ||
39 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
40 | } | ||
41 | |||
42 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
43 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
44 | { | ||
45 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
46 | } | ||
47 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile index be240404ba96..74a8d87e542b 100644 --- a/arch/arm64/xen/Makefile +++ b/arch/arm64/xen/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o) | 1 | xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o) |
2 | obj-y := xen-arm.o hypercall.o | 2 | obj-y := xen-arm.o hypercall.o |
diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h new file mode 100644 index 000000000000..96e42f97fa1f --- /dev/null +++ b/arch/ia64/include/asm/xen/page-coherent.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _ASM_IA64_XEN_PAGE_COHERENT_H | ||
2 | #define _ASM_IA64_XEN_PAGE_COHERENT_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
9 | dma_addr_t *dma_handle, gfp_t flags, | ||
10 | struct dma_attrs *attrs) | ||
11 | { | ||
12 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); | ||
13 | *dma_handle = virt_to_phys(vstart); | ||
14 | return vstart; | ||
15 | } | ||
16 | |||
17 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
18 | void *cpu_addr, dma_addr_t dma_handle, | ||
19 | struct dma_attrs *attrs) | ||
20 | { | ||
21 | free_pages((unsigned long) cpu_addr, get_order(size)); | ||
22 | } | ||
23 | |||
24 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
25 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
26 | struct dma_attrs *attrs) { } | ||
27 | |||
28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
29 | size_t size, enum dma_data_direction dir, | ||
30 | struct dma_attrs *attrs) { } | ||
31 | |||
32 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
33 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | ||
34 | |||
35 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
36 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | ||
37 | |||
38 | #endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h new file mode 100644 index 000000000000..7f02fe4e2c7b --- /dev/null +++ b/arch/x86/include/asm/xen/page-coherent.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _ASM_X86_XEN_PAGE_COHERENT_H | ||
2 | #define _ASM_X86_XEN_PAGE_COHERENT_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
9 | dma_addr_t *dma_handle, gfp_t flags, | ||
10 | struct dma_attrs *attrs) | ||
11 | { | ||
12 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); | ||
13 | *dma_handle = virt_to_phys(vstart); | ||
14 | return vstart; | ||
15 | } | ||
16 | |||
17 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
18 | void *cpu_addr, dma_addr_t dma_handle, | ||
19 | struct dma_attrs *attrs) | ||
20 | { | ||
21 | free_pages((unsigned long) cpu_addr, get_order(size)); | ||
22 | } | ||
23 | |||
24 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
25 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
26 | struct dma_attrs *attrs) { } | ||
27 | |||
28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
29 | size_t size, enum dma_data_direction dir, | ||
30 | struct dma_attrs *attrs) { } | ||
31 | |||
32 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
33 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | ||
34 | |||
35 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
36 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | ||
37 | |||
38 | #endif /* _ASM_X86_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c9631e73a090..b3f36369e667 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -2328,12 +2328,14 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, | |||
2328 | return success; | 2328 | return success; |
2329 | } | 2329 | } |
2330 | 2330 | ||
2331 | int xen_create_contiguous_region(unsigned long vstart, unsigned int order, | 2331 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
2332 | unsigned int address_bits) | 2332 | unsigned int address_bits, |
2333 | dma_addr_t *dma_handle) | ||
2333 | { | 2334 | { |
2334 | unsigned long *in_frames = discontig_frames, out_frame; | 2335 | unsigned long *in_frames = discontig_frames, out_frame; |
2335 | unsigned long flags; | 2336 | unsigned long flags; |
2336 | int success; | 2337 | int success; |
2338 | unsigned long vstart = (unsigned long)phys_to_virt(pstart); | ||
2337 | 2339 | ||
2338 | /* | 2340 | /* |
2339 | * Currently an auto-translated guest will not perform I/O, nor will | 2341 | * Currently an auto-translated guest will not perform I/O, nor will |
@@ -2368,15 +2370,17 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order, | |||
2368 | 2370 | ||
2369 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | 2371 | spin_unlock_irqrestore(&xen_reservation_lock, flags); |
2370 | 2372 | ||
2373 | *dma_handle = virt_to_machine(vstart).maddr; | ||
2371 | return success ? 0 : -ENOMEM; | 2374 | return success ? 0 : -ENOMEM; |
2372 | } | 2375 | } |
2373 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | 2376 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); |
2374 | 2377 | ||
2375 | void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) | 2378 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
2376 | { | 2379 | { |
2377 | unsigned long *out_frames = discontig_frames, in_frame; | 2380 | unsigned long *out_frames = discontig_frames, in_frame; |
2378 | unsigned long flags; | 2381 | unsigned long flags; |
2379 | int success; | 2382 | int success; |
2383 | unsigned long vstart; | ||
2380 | 2384 | ||
2381 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 2385 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
2382 | return; | 2386 | return; |
@@ -2384,6 +2388,7 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) | |||
2384 | if (unlikely(order > MAX_CONTIG_ORDER)) | 2388 | if (unlikely(order > MAX_CONTIG_ORDER)) |
2385 | return; | 2389 | return; |
2386 | 2390 | ||
2391 | vstart = (unsigned long)phys_to_virt(pstart); | ||
2387 | memset((void *) vstart, 0, PAGE_SIZE << order); | 2392 | memset((void *) vstart, 0, PAGE_SIZE << order); |
2388 | 2393 | ||
2389 | spin_lock_irqsave(&xen_reservation_lock, flags); | 2394 | spin_lock_irqsave(&xen_reservation_lock, flags); |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index a61c7d5811be..2ae8699e8767 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -799,10 +799,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
799 | { | 799 | { |
800 | unsigned topidx, mididx, idx; | 800 | unsigned topidx, mididx, idx; |
801 | 801 | ||
802 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | 802 | /* don't track P2M changes in autotranslate guests */ |
803 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | 803 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) |
804 | return true; | 804 | return true; |
805 | } | 805 | |
806 | if (unlikely(pfn >= MAX_P2M_PFN)) { | 806 | if (unlikely(pfn >= MAX_P2M_PFN)) { |
807 | BUG_ON(mfn != INVALID_P2M_ENTRY); | 807 | BUG_ON(mfn != INVALID_P2M_ENTRY); |
808 | return true; | 808 | return true; |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 23eae5cb69c2..c794ea182140 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -140,7 +140,6 @@ config XEN_GRANT_DEV_ALLOC | |||
140 | 140 | ||
141 | config SWIOTLB_XEN | 141 | config SWIOTLB_XEN |
142 | def_bool y | 142 | def_bool y |
143 | depends on PCI && X86 | ||
144 | select SWIOTLB | 143 | select SWIOTLB |
145 | 144 | ||
146 | config XEN_TMEM | 145 | config XEN_TMEM |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index c4d2298893b1..62ccf5424ba8 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <xen/grant_table.h> | 49 | #include <xen/grant_table.h> |
50 | #include <xen/interface/memory.h> | 50 | #include <xen/interface/memory.h> |
51 | #include <xen/hvc-console.h> | 51 | #include <xen/hvc-console.h> |
52 | #include <xen/swiotlb-xen.h> | ||
52 | #include <asm/xen/hypercall.h> | 53 | #include <asm/xen/hypercall.h> |
53 | #include <asm/xen/interface.h> | 54 | #include <asm/xen/interface.h> |
54 | 55 | ||
@@ -898,8 +899,16 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
898 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, | 899 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, |
899 | &map_ops[i].status, __func__); | 900 | &map_ops[i].status, __func__); |
900 | 901 | ||
901 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 902 | /* this is basically a nop on x86 */ |
903 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
904 | for (i = 0; i < count; i++) { | ||
905 | if (map_ops[i].status) | ||
906 | continue; | ||
907 | set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, | ||
908 | map_ops[i].dev_bus_addr >> PAGE_SHIFT); | ||
909 | } | ||
902 | return ret; | 910 | return ret; |
911 | } | ||
903 | 912 | ||
904 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | 913 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { |
905 | arch_enter_lazy_mmu_mode(); | 914 | arch_enter_lazy_mmu_mode(); |
@@ -942,8 +951,14 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
942 | if (ret) | 951 | if (ret) |
943 | return ret; | 952 | return ret; |
944 | 953 | ||
945 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 954 | /* this is basically a nop on x86 */ |
955 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
956 | for (i = 0; i < count; i++) { | ||
957 | set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, | ||
958 | INVALID_P2M_ENTRY); | ||
959 | } | ||
946 | return ret; | 960 | return ret; |
961 | } | ||
947 | 962 | ||
948 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | 963 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { |
949 | arch_enter_lazy_mmu_mode(); | 964 | arch_enter_lazy_mmu_mode(); |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 44af9d8577de..a224bc74b6b9 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -43,6 +43,9 @@ | |||
43 | #include <xen/xen-ops.h> | 43 | #include <xen/xen-ops.h> |
44 | #include <xen/hvc-console.h> | 44 | #include <xen/hvc-console.h> |
45 | 45 | ||
46 | #include <asm/dma-mapping.h> | ||
47 | #include <asm/xen/page-coherent.h> | ||
48 | |||
46 | #include <trace/events/swiotlb.h> | 49 | #include <trace/events/swiotlb.h> |
47 | /* | 50 | /* |
48 | * Used to do a quick range check in swiotlb_tbl_unmap_single and | 51 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
@@ -50,6 +53,20 @@ | |||
50 | * API. | 53 | * API. |
51 | */ | 54 | */ |
52 | 55 | ||
56 | #ifndef CONFIG_X86 | ||
57 | static unsigned long dma_alloc_coherent_mask(struct device *dev, | ||
58 | gfp_t gfp) | ||
59 | { | ||
60 | unsigned long dma_mask = 0; | ||
61 | |||
62 | dma_mask = dev->coherent_dma_mask; | ||
63 | if (!dma_mask) | ||
64 | dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); | ||
65 | |||
66 | return dma_mask; | ||
67 | } | ||
68 | #endif | ||
69 | |||
53 | static char *xen_io_tlb_start, *xen_io_tlb_end; | 70 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
54 | static unsigned long xen_io_tlb_nslabs; | 71 | static unsigned long xen_io_tlb_nslabs; |
55 | /* | 72 | /* |
@@ -58,17 +75,17 @@ static unsigned long xen_io_tlb_nslabs; | |||
58 | 75 | ||
59 | static u64 start_dma_addr; | 76 | static u64 start_dma_addr; |
60 | 77 | ||
61 | static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) | 78 | static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
62 | { | 79 | { |
63 | return phys_to_machine(XPADDR(paddr)).maddr; | 80 | return phys_to_machine(XPADDR(paddr)).maddr; |
64 | } | 81 | } |
65 | 82 | ||
66 | static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) | 83 | static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
67 | { | 84 | { |
68 | return machine_to_phys(XMADDR(baddr)).paddr; | 85 | return machine_to_phys(XMADDR(baddr)).paddr; |
69 | } | 86 | } |
70 | 87 | ||
71 | static dma_addr_t xen_virt_to_bus(void *address) | 88 | static inline dma_addr_t xen_virt_to_bus(void *address) |
72 | { | 89 | { |
73 | return xen_phys_to_bus(virt_to_phys(address)); | 90 | return xen_phys_to_bus(virt_to_phys(address)); |
74 | } | 91 | } |
@@ -91,7 +108,7 @@ static int check_pages_physically_contiguous(unsigned long pfn, | |||
91 | return 1; | 108 | return 1; |
92 | } | 109 | } |
93 | 110 | ||
94 | static int range_straddles_page_boundary(phys_addr_t p, size_t size) | 111 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) |
95 | { | 112 | { |
96 | unsigned long pfn = PFN_DOWN(p); | 113 | unsigned long pfn = PFN_DOWN(p); |
97 | unsigned int offset = p & ~PAGE_MASK; | 114 | unsigned int offset = p & ~PAGE_MASK; |
@@ -128,6 +145,8 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) | |||
128 | { | 145 | { |
129 | int i, rc; | 146 | int i, rc; |
130 | int dma_bits; | 147 | int dma_bits; |
148 | dma_addr_t dma_handle; | ||
149 | phys_addr_t p = virt_to_phys(buf); | ||
131 | 150 | ||
132 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; | 151 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; |
133 | 152 | ||
@@ -137,9 +156,9 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) | |||
137 | 156 | ||
138 | do { | 157 | do { |
139 | rc = xen_create_contiguous_region( | 158 | rc = xen_create_contiguous_region( |
140 | (unsigned long)buf + (i << IO_TLB_SHIFT), | 159 | p + (i << IO_TLB_SHIFT), |
141 | get_order(slabs << IO_TLB_SHIFT), | 160 | get_order(slabs << IO_TLB_SHIFT), |
142 | dma_bits); | 161 | dma_bits, &dma_handle); |
143 | } while (rc && dma_bits++ < max_dma_bits); | 162 | } while (rc && dma_bits++ < max_dma_bits); |
144 | if (rc) | 163 | if (rc) |
145 | return rc; | 164 | return rc; |
@@ -265,7 +284,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
265 | void *ret; | 284 | void *ret; |
266 | int order = get_order(size); | 285 | int order = get_order(size); |
267 | u64 dma_mask = DMA_BIT_MASK(32); | 286 | u64 dma_mask = DMA_BIT_MASK(32); |
268 | unsigned long vstart; | ||
269 | phys_addr_t phys; | 287 | phys_addr_t phys; |
270 | dma_addr_t dev_addr; | 288 | dma_addr_t dev_addr; |
271 | 289 | ||
@@ -280,8 +298,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
280 | if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) | 298 | if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) |
281 | return ret; | 299 | return ret; |
282 | 300 | ||
283 | vstart = __get_free_pages(flags, order); | 301 | /* On ARM this function returns an ioremap'ped virtual address for |
284 | ret = (void *)vstart; | 302 | * which virt_to_phys doesn't return the corresponding physical |
303 | * address. In fact on ARM virt_to_phys only works for kernel direct | ||
304 | * mapped RAM memory. Also see comment below. | ||
305 | */ | ||
306 | ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); | ||
285 | 307 | ||
286 | if (!ret) | 308 | if (!ret) |
287 | return ret; | 309 | return ret; |
@@ -289,18 +311,21 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
289 | if (hwdev && hwdev->coherent_dma_mask) | 311 | if (hwdev && hwdev->coherent_dma_mask) |
290 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); | 312 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); |
291 | 313 | ||
292 | phys = virt_to_phys(ret); | 314 | /* At this point dma_handle is the physical address, next we are |
315 | * going to set it to the machine address. | ||
316 | * Do not use virt_to_phys(ret) because on ARM it doesn't correspond | ||
317 | * to *dma_handle. */ | ||
318 | phys = *dma_handle; | ||
293 | dev_addr = xen_phys_to_bus(phys); | 319 | dev_addr = xen_phys_to_bus(phys); |
294 | if (((dev_addr + size - 1 <= dma_mask)) && | 320 | if (((dev_addr + size - 1 <= dma_mask)) && |
295 | !range_straddles_page_boundary(phys, size)) | 321 | !range_straddles_page_boundary(phys, size)) |
296 | *dma_handle = dev_addr; | 322 | *dma_handle = dev_addr; |
297 | else { | 323 | else { |
298 | if (xen_create_contiguous_region(vstart, order, | 324 | if (xen_create_contiguous_region(phys, order, |
299 | fls64(dma_mask)) != 0) { | 325 | fls64(dma_mask), dma_handle) != 0) { |
300 | free_pages(vstart, order); | 326 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
301 | return NULL; | 327 | return NULL; |
302 | } | 328 | } |
303 | *dma_handle = virt_to_machine(ret).maddr; | ||
304 | } | 329 | } |
305 | memset(ret, 0, size); | 330 | memset(ret, 0, size); |
306 | return ret; | 331 | return ret; |
@@ -321,13 +346,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
321 | if (hwdev && hwdev->coherent_dma_mask) | 346 | if (hwdev && hwdev->coherent_dma_mask) |
322 | dma_mask = hwdev->coherent_dma_mask; | 347 | dma_mask = hwdev->coherent_dma_mask; |
323 | 348 | ||
324 | phys = virt_to_phys(vaddr); | 349 | /* do not use virt_to_phys because on ARM it doesn't return you the |
350 | * physical address */ | ||
351 | phys = xen_bus_to_phys(dev_addr); | ||
325 | 352 | ||
326 | if (((dev_addr + size - 1 > dma_mask)) || | 353 | if (((dev_addr + size - 1 > dma_mask)) || |
327 | range_straddles_page_boundary(phys, size)) | 354 | range_straddles_page_boundary(phys, size)) |
328 | xen_destroy_contiguous_region((unsigned long)vaddr, order); | 355 | xen_destroy_contiguous_region(phys, order); |
329 | 356 | ||
330 | free_pages((unsigned long)vaddr, order); | 357 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); |
331 | } | 358 | } |
332 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); | 359 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); |
333 | 360 | ||
@@ -354,8 +381,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
354 | * buffering it. | 381 | * buffering it. |
355 | */ | 382 | */ |
356 | if (dma_capable(dev, dev_addr, size) && | 383 | if (dma_capable(dev, dev_addr, size) && |
357 | !range_straddles_page_boundary(phys, size) && !swiotlb_force) | 384 | !range_straddles_page_boundary(phys, size) && !swiotlb_force) { |
385 | /* we are not interested in the dma_addr returned by | ||
386 | * xen_dma_map_page, only in the potential cache flushes executed | ||
387 | * by the function. */ | ||
388 | xen_dma_map_page(dev, page, offset, size, dir, attrs); | ||
358 | return dev_addr; | 389 | return dev_addr; |
390 | } | ||
359 | 391 | ||
360 | /* | 392 | /* |
361 | * Oh well, have to allocate and map a bounce buffer. | 393 | * Oh well, have to allocate and map a bounce buffer. |
@@ -366,6 +398,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
366 | if (map == SWIOTLB_MAP_ERROR) | 398 | if (map == SWIOTLB_MAP_ERROR) |
367 | return DMA_ERROR_CODE; | 399 | return DMA_ERROR_CODE; |
368 | 400 | ||
401 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), | ||
402 | map & ~PAGE_MASK, size, dir, attrs); | ||
369 | dev_addr = xen_phys_to_bus(map); | 403 | dev_addr = xen_phys_to_bus(map); |
370 | 404 | ||
371 | /* | 405 | /* |
@@ -388,12 +422,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); | |||
388 | * whatever the device wrote there. | 422 | * whatever the device wrote there. |
389 | */ | 423 | */ |
390 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 424 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
391 | size_t size, enum dma_data_direction dir) | 425 | size_t size, enum dma_data_direction dir, |
426 | struct dma_attrs *attrs) | ||
392 | { | 427 | { |
393 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); | 428 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
394 | 429 | ||
395 | BUG_ON(dir == DMA_NONE); | 430 | BUG_ON(dir == DMA_NONE); |
396 | 431 | ||
432 | xen_dma_unmap_page(hwdev, paddr, size, dir, attrs); | ||
433 | |||
397 | /* NOTE: We use dev_addr here, not paddr! */ | 434 | /* NOTE: We use dev_addr here, not paddr! */ |
398 | if (is_xen_swiotlb_buffer(dev_addr)) { | 435 | if (is_xen_swiotlb_buffer(dev_addr)) { |
399 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); | 436 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); |
@@ -416,7 +453,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | |||
416 | size_t size, enum dma_data_direction dir, | 453 | size_t size, enum dma_data_direction dir, |
417 | struct dma_attrs *attrs) | 454 | struct dma_attrs *attrs) |
418 | { | 455 | { |
419 | xen_unmap_single(hwdev, dev_addr, size, dir); | 456 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); |
420 | } | 457 | } |
421 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); | 458 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); |
422 | 459 | ||
@@ -439,11 +476,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
439 | 476 | ||
440 | BUG_ON(dir == DMA_NONE); | 477 | BUG_ON(dir == DMA_NONE); |
441 | 478 | ||
479 | if (target == SYNC_FOR_CPU) | ||
480 | xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); | ||
481 | |||
442 | /* NOTE: We use dev_addr here, not paddr! */ | 482 | /* NOTE: We use dev_addr here, not paddr! */ |
443 | if (is_xen_swiotlb_buffer(dev_addr)) { | 483 | if (is_xen_swiotlb_buffer(dev_addr)) |
444 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); | 484 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
445 | return; | 485 | |
446 | } | 486 | if (target == SYNC_FOR_DEVICE) |
487 | xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); | ||
447 | 488 | ||
448 | if (dir != DMA_FROM_DEVICE) | 489 | if (dir != DMA_FROM_DEVICE) |
449 | return; | 490 | return; |
@@ -506,16 +547,26 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
506 | sg->length, | 547 | sg->length, |
507 | dir); | 548 | dir); |
508 | if (map == SWIOTLB_MAP_ERROR) { | 549 | if (map == SWIOTLB_MAP_ERROR) { |
550 | dev_warn(hwdev, "swiotlb buffer is full\n"); | ||
509 | /* Don't panic here, we expect map_sg users | 551 | /* Don't panic here, we expect map_sg users |
510 | to do proper error handling. */ | 552 | to do proper error handling. */ |
511 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, | 553 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
512 | attrs); | 554 | attrs); |
513 | sg_dma_len(sgl) = 0; | 555 | sg_dma_len(sgl) = 0; |
514 | return DMA_ERROR_CODE; | 556 | return 0; |
515 | } | 557 | } |
516 | sg->dma_address = xen_phys_to_bus(map); | 558 | sg->dma_address = xen_phys_to_bus(map); |
517 | } else | 559 | } else { |
560 | /* we are not interested in the dma_addr returned by | ||
561 | * xen_dma_map_page, only in the potential cache flushes executed | ||
562 | * by the function. */ | ||
563 | xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), | ||
564 | paddr & ~PAGE_MASK, | ||
565 | sg->length, | ||
566 | dir, | ||
567 | attrs); | ||
518 | sg->dma_address = dev_addr; | 568 | sg->dma_address = dev_addr; |
569 | } | ||
519 | sg_dma_len(sg) = sg->length; | 570 | sg_dma_len(sg) = sg->length; |
520 | } | 571 | } |
521 | return nelems; | 572 | return nelems; |
@@ -537,7 +588,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
537 | BUG_ON(dir == DMA_NONE); | 588 | BUG_ON(dir == DMA_NONE); |
538 | 589 | ||
539 | for_each_sg(sgl, sg, nelems, i) | 590 | for_each_sg(sgl, sg, nelems, i) |
540 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir); | 591 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); |
541 | 592 | ||
542 | } | 593 | } |
543 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); | 594 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); |
@@ -597,3 +648,15 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) | |||
597 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; | 648 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
598 | } | 649 | } |
599 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); | 650 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); |
651 | |||
652 | int | ||
653 | xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) | ||
654 | { | ||
655 | if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask)) | ||
656 | return -EIO; | ||
657 | |||
658 | *dev->dma_mask = dma_mask; | ||
659 | |||
660 | return 0; | ||
661 | } | ||
662 | EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); | ||
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index 4d81cccf1097..8b2eb93ae8ba 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h | |||
@@ -56,4 +56,6 @@ xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | |||
56 | extern int | 56 | extern int |
57 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); | 57 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); |
58 | 58 | ||
59 | extern int | ||
60 | xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask); | ||
59 | #endif /* __LINUX_SWIOTLB_XEN_H */ | 61 | #endif /* __LINUX_SWIOTLB_XEN_H */ |
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index d6fe062cad6b..fb2ea8f26552 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -19,10 +19,11 @@ void xen_arch_resume(void); | |||
19 | int xen_setup_shutdown_event(void); | 19 | int xen_setup_shutdown_event(void); |
20 | 20 | ||
21 | extern unsigned long *xen_contiguous_bitmap; | 21 | extern unsigned long *xen_contiguous_bitmap; |
22 | int xen_create_contiguous_region(unsigned long vstart, unsigned int order, | 22 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
23 | unsigned int address_bits); | 23 | unsigned int address_bits, |
24 | dma_addr_t *dma_handle); | ||
24 | 25 | ||
25 | void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order); | 26 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); |
26 | 27 | ||
27 | struct vm_area_struct; | 28 | struct vm_area_struct; |
28 | int xen_remap_domain_mfn_range(struct vm_area_struct *vma, | 29 | int xen_remap_domain_mfn_range(struct vm_area_struct *vma, |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 55587060e893..e4399fa65ad6 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -505,6 +505,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | |||
505 | 505 | ||
506 | not_found: | 506 | not_found: |
507 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 507 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
508 | dev_warn(hwdev, "swiotlb buffer is full\n"); | ||
508 | return SWIOTLB_MAP_ERROR; | 509 | return SWIOTLB_MAP_ERROR; |
509 | found: | 510 | found: |
510 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 511 | spin_unlock_irqrestore(&io_tlb_lock, flags); |