From 25b719d7b45947a79d298414cdfb5ec8fadf0ec8 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Tue, 15 Oct 2013 15:49:03 +0000 Subject: arm64: define DMA_ERROR_CODE Signed-off-by: Stefano Stabellini Acked-by: Catalin Marinas CC: will.deacon@arm.com Changes in v8: - cast to dma_addr_t before returning. --- arch/arm64/include/asm/dma-mapping.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 8d1810001aef..59206b178494 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -25,6 +25,7 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK +#define DMA_ERROR_CODE (~(dma_addr_t)0) extern struct dma_map_ops *dma_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) -- cgit v1.2.2 From 4a19138c6505e224d9f4cc2fe9ada9188d7100ea Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Thu, 17 Oct 2013 16:22:27 +0000 Subject: arm/xen,arm64/xen: introduce p2m Introduce physical to machine and machine to physical tracking mechanisms based on rbtrees for arm/xen and arm64/xen. We need it because any guests on ARM are an autotranslate guests, therefore a physical address is potentially different from a machine address. When programming a device to do DMA, we need to be extra-careful to use machine addresses rather than physical addresses to program the device. Therefore we need to know the physical to machine mappings. For the moment we assume that dom0 starts with a 1:1 physical to machine mapping, in other words physical addresses correspond to machine addresses. However when mapping a foreign grant reference, obviously the 1:1 model doesn't work anymore. So at the very least we need to be able to track grant mappings. We need locking to protect accesses to the two trees. Signed-off-by: Stefano Stabellini Changes in v8: - move pfn_to_mfn and mfn_to_pfn to page.h as static inline functions; - no need to walk the tree if phys_to_mach.rb_node is NULL; - correctly handle multipage p2m entries; - substitute the spin_lock with a rwlock. --- arch/arm64/xen/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile index be240404ba96..cd866b0c619b 100644 --- a/arch/arm64/xen/Makefile +++ b/arch/arm64/xen/Makefile @@ -1,2 +1,2 @@ -xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o) +xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o) obj-y := xen-arm.o hypercall.o -- cgit v1.2.2 From 83862ccfc0a03212fde43b4ac29c28381828768b Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Thu, 10 Oct 2013 13:40:44 +0000 Subject: xen/arm,arm64: enable SWIOTLB_XEN Xen on arm and arm64 needs SWIOTLB_XEN: when running on Xen we need to program the hardware with mfns rather than pfns for dma addresses. Remove SWIOTLB_XEN dependency on X86 and PCI and make XEN select SWIOTLB_XEN on arm and arm64. At the moment always rely on swiotlb-xen, but when Xen starts supporting hardware IOMMUs we'll be able to avoid it conditionally on the presence of an IOMMU on the platform. Implement xen_create_contiguous_region on arm and arm64: for the moment we assume that dom0 has been mapped 1:1 (physical addresses == machine addresses) therefore we don't need to call XENMEM_exchange. Simply return the physical address as dma address. Initialize the xen-swiotlb from xen_early_init (before the native dma_ops are initialized), set xen_dma_ops to &xen_swiotlb_dma_ops. Signed-off-by: Stefano Stabellini Changes in v8: - assume dom0 is mapped 1:1, no need to call XENMEM_exchange. Changes in v7: - call __set_phys_to_machine_multi from xen_create_contiguous_region and xen_destroy_contiguous_region to update the P2M; - don't call XENMEM_unpin, it has been removed; - call XENMEM_exchange instead of XENMEM_exchange_and_pin; - set nr_exchanged to 0 before calling the hypercall. Changes in v6: - introduce and export xen_dma_ops; - call xen_mm_init from as arch_initcall. Changes in v4: - remove redefinition of DMA_ERROR_CODE; - update the code to use XENMEM_exchange_and_pin and XENMEM_unpin; - add a note about hardware IOMMU in the commit message. Changes in v3: - code style changes; - warn on XENMEM_put_dma_buf failures. --- arch/arm64/Kconfig | 1 + arch/arm64/xen/Makefile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c04454876bcb..04ffafb6fbe9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -211,6 +211,7 @@ config XEN_DOM0 config XEN bool "Xen guest support on ARM64 (EXPERIMENTAL)" depends on ARM64 && OF + select SWIOTLB_XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile index cd866b0c619b..74a8d87e542b 100644 --- a/arch/arm64/xen/Makefile +++ b/arch/arm64/xen/Makefile @@ -1,2 +1,2 @@ -xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o) +xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o) obj-y := xen-arm.o hypercall.o -- cgit v1.2.2 From 58c8b26909f287c72a7a39d26f800ffe61e01bf3 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 18 Oct 2013 16:01:32 +0000 Subject: arm64/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain We can't simply override arm_dma_ops with xen_dma_ops because devices are allowed to have their own dma_ops and they take precedence over arm_dma_ops. When running on Xen as initial domain, we always want xen_dma_ops to be the one in use. We introduce __generic_dma_ops to allow xen_dma_ops functions to call back to the native implementation. Signed-off-by: Stefano Stabellini Acked-by: Catalin Marinas CC: will.deacon@arm.com Changes in v7: - return xen_dma_ops only if we are the initial domain; - rename __get_dma_ops to __generic_dma_ops. --- arch/arm64/include/asm/dma-mapping.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 59206b178494..fd0c0c0e447a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -23,12 +23,15 @@ #include +#include +#include + #define ARCH_HAS_DMA_GET_REQUIRED_MASK #define DMA_ERROR_CODE (~(dma_addr_t)0) extern struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { if (unlikely(!dev) || !dev->archdata.dma_ops) return dma_ops; @@ -36,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dev->archdata.dma_ops; } +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (xen_initial_domain()) + return xen_dma_ops; + else + return __generic_dma_ops(dev); +} + #include static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) -- cgit v1.2.2 From d6fe76c58c358498b91d21f0ca8054f6aa6e672d Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 9 Oct 2013 17:18:14 +0000 Subject: xen: introduce xen_alloc/free_coherent_pages xen_swiotlb_alloc_coherent needs to allocate a coherent buffer for cpu and devices. On native x86 is sufficient to call __get_free_pages in order to get a coherent buffer, while on ARM (and potentially ARM64) we need to call the native dma_ops->alloc implementation. Introduce xen_alloc_coherent_pages to abstract the arch specific buffer allocation. Similarly introduce xen_free_coherent_pages to free a coherent buffer: on x86 is simply a call to free_pages while on ARM and ARM64 is arm_dma_ops.free. Signed-off-by: Stefano Stabellini Changes in v7: - rename __get_dma_ops to __generic_dma_ops; - call __generic_dma_ops(hwdev)->alloc/free on arm64 too. Changes in v6: - call __get_dma_ops to get the native dma_ops pointer on arm. --- arch/arm64/include/asm/xen/page-coherent.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 arch/arm64/include/asm/xen/page-coherent.h (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h new file mode 100644 index 000000000000..3b4f029b6660 --- /dev/null +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -0,0 +1,22 @@ +#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H +#define _ASM_ARM64_XEN_PAGE_COHERENT_H + +#include +#include +#include + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ -- cgit v1.2.2 From 7100b077ab4ff5fb0ba7760ce54465f623a0a763 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 25 Oct 2013 10:39:49 +0000 Subject: xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device Introduce xen_dma_map_page, xen_dma_unmap_page, xen_dma_sync_single_for_cpu and xen_dma_sync_single_for_device. They have empty implementations on x86 and ia64 but they call the corresponding platform dma_ops function on arm and arm64. Signed-off-by: Stefano Stabellini Changes in v9: - xen_dma_map_page return void, avoid page_to_phys. --- arch/arm64/include/asm/xen/page-coherent.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 3b4f029b6660..2820f1a6eebe 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -19,4 +19,29 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); } +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); +} #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ -- cgit v1.2.2 From 3d1975b57097820c131c692d2e0d075641370369 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 25 Oct 2013 10:33:26 +0000 Subject: arm,arm64: do not always merge biovec if we are running on Xen This is similar to what it is done on X86: biovecs are prevented from merging otherwise every dma requests would be forced to bounce on the swiotlb buffer. Signed-off-by: Stefano Stabellini Acked-by: Catalin Marinas Changes in v7: - remove the extra autotranslate check in biomerge.c. --- arch/arm64/include/asm/io.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 1d12f89140ba..c163287b9871 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -22,11 +22,14 @@ #ifdef __KERNEL__ #include +#include #include #include #include +#include + /* * Generic IO read/write. These perform native-endian accesses. */ @@ -263,5 +266,11 @@ extern int devmem_is_allowed(unsigned long pfn); */ #define xlate_dev_kmem_ptr(p) p +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) + #endif /* __KERNEL__ */ #endif /* __ASM_IO_H */ -- cgit v1.2.2 From ffc555be09bd2603abdbc6f8b64c6a7477facc48 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 6 Nov 2013 12:38:28 +0000 Subject: arm,arm64/include/asm/io.h: define struct bio_vec Signed-off-by: Stefano Stabellini Acked-by: Olof Johansson Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm64/include/asm/io.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index c163287b9871..757c87a04531 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -266,6 +266,7 @@ extern int devmem_is_allowed(unsigned long pfn); */ #define xlate_dev_kmem_ptr(p) p +struct bio_vec; extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, const struct bio_vec *vec2); #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ -- cgit v1.2.2