aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-11-08 15:36:09 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-11-08 16:10:48 -0500
commite1d8f62ad49a6a7068aa1bdc30252911d71c4dc4 (patch)
treee7ad9bf58ba9b58bf48ff59283ba0c27b03969d1 /arch/arm64/include
parentbad97817dece759dd6c0b24f862b7d0ed588edda (diff)
parent15177608c703e7b4aa29aa7c93b31001effe504c (diff)
Merge remote-tracking branch 'stefano/swiotlb-xen-9.1' into stable/for-linus-3.13
* stefano/swiotlb-xen-9.1: swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary grant-table: call set_phys_to_machine after mapping grant refs arm,arm64: do not always merge biovec if we are running on Xen swiotlb: print a warning when the swiotlb is full swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device swiotlb-xen: use xen_alloc/free_coherent_pages xen: introduce xen_alloc/free_coherent_pages arm64/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain arm/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain swiotlb-xen: introduce xen_swiotlb_set_dma_mask xen/arm,arm64: enable SWIOTLB_XEN xen: make xen_create_contiguous_region return the dma address xen/x86: allow __set_phys_to_machine for autotranslate guests arm/xen,arm64/xen: introduce p2m arm64: define DMA_ERROR_CODE arm: make SWIOTLB available Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Conflicts: arch/arm/include/asm/dma-mapping.h drivers/xen/swiotlb-xen.c [Conflicts arose b/c "arm: make SWIOTLB available" v8 was in Stefano's branch, while I had v9 + Ack from Russel. I also fixed up white-space issues]
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/dma-mapping.h14
-rw-r--r--arch/arm64/include/asm/io.h9
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h47
3 files changed, 69 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 8d1810001aef..fd0c0c0e447a 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,11 +23,15 @@
23 23
24#include <asm-generic/dma-coherent.h> 24#include <asm-generic/dma-coherent.h>
25 25
26#include <xen/xen.h>
27#include <asm/xen/hypervisor.h>
28
26#define ARCH_HAS_DMA_GET_REQUIRED_MASK 29#define ARCH_HAS_DMA_GET_REQUIRED_MASK
27 30
31#define DMA_ERROR_CODE (~(dma_addr_t)0)
28extern struct dma_map_ops *dma_ops; 32extern struct dma_map_ops *dma_ops;
29 33
30static inline struct dma_map_ops *get_dma_ops(struct device *dev) 34static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
31{ 35{
32 if (unlikely(!dev) || !dev->archdata.dma_ops) 36 if (unlikely(!dev) || !dev->archdata.dma_ops)
33 return dma_ops; 37 return dma_ops;
@@ -35,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
35 return dev->archdata.dma_ops; 39 return dev->archdata.dma_ops;
36} 40}
37 41
42static inline struct dma_map_ops *get_dma_ops(struct device *dev)
43{
44 if (xen_initial_domain())
45 return xen_dma_ops;
46 else
47 return __generic_dma_ops(dev);
48}
49
38#include <asm-generic/dma-mapping-common.h> 50#include <asm-generic/dma-mapping-common.h>
39 51
40static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 52static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 1d12f89140ba..c163287b9871 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,11 +22,14 @@
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23 23
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/blk_types.h>
25 26
26#include <asm/byteorder.h> 27#include <asm/byteorder.h>
27#include <asm/barrier.h> 28#include <asm/barrier.h>
28#include <asm/pgtable.h> 29#include <asm/pgtable.h>
29 30
31#include <xen/xen.h>
32
30/* 33/*
31 * Generic IO read/write. These perform native-endian accesses. 34 * Generic IO read/write. These perform native-endian accesses.
32 */ 35 */
@@ -263,5 +266,11 @@ extern int devmem_is_allowed(unsigned long pfn);
263 */ 266 */
264#define xlate_dev_kmem_ptr(p) p 267#define xlate_dev_kmem_ptr(p) p
265 268
269extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
270 const struct bio_vec *vec2);
271#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
272 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
273 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
274
266#endif /* __KERNEL__ */ 275#endif /* __KERNEL__ */
267#endif /* __ASM_IO_H */ 276#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..2820f1a6eebe
--- /dev/null
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -0,0 +1,47 @@
1#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
2#define _ASM_ARM64_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
13}
14
15static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
16 void *cpu_addr, dma_addr_t dma_handle,
17 struct dma_attrs *attrs)
18{
19 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
20}
21
22static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs)
25{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27}
28
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs)
32{
33 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
34}
35
36static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir)
38{
39 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
40}
41
42static inline void xen_dma_sync_single_for_device(struct device *hwdev,
43 dma_addr_t handle, size_t size, enum dma_data_direction dir)
44{
45 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
46}
47#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */