diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-11-08 15:36:09 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-11-08 16:10:48 -0500 |
commit | e1d8f62ad49a6a7068aa1bdc30252911d71c4dc4 (patch) | |
tree | e7ad9bf58ba9b58bf48ff59283ba0c27b03969d1 /arch/arm/include | |
parent | bad97817dece759dd6c0b24f862b7d0ed588edda (diff) | |
parent | 15177608c703e7b4aa29aa7c93b31001effe504c (diff) |
Merge remote-tracking branch 'stefano/swiotlb-xen-9.1' into stable/for-linus-3.13
* stefano/swiotlb-xen-9.1:
swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs
swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary
grant-table: call set_phys_to_machine after mapping grant refs
arm,arm64: do not always merge biovec if we are running on Xen
swiotlb: print a warning when the swiotlb is full
swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device
xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device
swiotlb-xen: use xen_alloc/free_coherent_pages
xen: introduce xen_alloc/free_coherent_pages
arm64/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain
arm/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain
swiotlb-xen: introduce xen_swiotlb_set_dma_mask
xen/arm,arm64: enable SWIOTLB_XEN
xen: make xen_create_contiguous_region return the dma address
xen/x86: allow __set_phys_to_machine for autotranslate guests
arm/xen,arm64/xen: introduce p2m
arm64: define DMA_ERROR_CODE
arm: make SWIOTLB available
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Conflicts:
arch/arm/include/asm/dma-mapping.h
drivers/xen/swiotlb-xen.c
[Conflicts arose b/c "arm: make SWIOTLB available" v8 was in Stefano's
branch, while I had v9 + Ack from Russel. I also fixed up white-space
issues]
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 13 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/hypervisor.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page-coherent.h | 50 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page.h | 50 |
5 files changed, 115 insertions, 8 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1ad2c171054b..8acfef48124a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -11,17 +11,28 @@ | |||
11 | #include <asm-generic/dma-coherent.h> | 11 | #include <asm-generic/dma-coherent.h> |
12 | #include <asm/memory.h> | 12 | #include <asm/memory.h> |
13 | 13 | ||
14 | #include <xen/xen.h> | ||
15 | #include <asm/xen/hypervisor.h> | ||
16 | |||
14 | #define DMA_ERROR_CODE (~0) | 17 | #define DMA_ERROR_CODE (~0) |
15 | extern struct dma_map_ops arm_dma_ops; | 18 | extern struct dma_map_ops arm_dma_ops; |
16 | extern struct dma_map_ops arm_coherent_dma_ops; | 19 | extern struct dma_map_ops arm_coherent_dma_ops; |
17 | 20 | ||
18 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | 21 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
19 | { | 22 | { |
20 | if (dev && dev->archdata.dma_ops) | 23 | if (dev && dev->archdata.dma_ops) |
21 | return dev->archdata.dma_ops; | 24 | return dev->archdata.dma_ops; |
22 | return &arm_dma_ops; | 25 | return &arm_dma_ops; |
23 | } | 26 | } |
24 | 27 | ||
28 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | ||
29 | { | ||
30 | if (xen_initial_domain()) | ||
31 | return xen_dma_ops; | ||
32 | else | ||
33 | return __generic_dma_ops(dev); | ||
34 | } | ||
35 | |||
25 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | 36 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) |
26 | { | 37 | { |
27 | BUG_ON(!dev); | 38 | BUG_ON(!dev); |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d070741b2b37..c45effc18312 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -24,9 +24,11 @@ | |||
24 | #ifdef __KERNEL__ | 24 | #ifdef __KERNEL__ |
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/blk_types.h> | ||
27 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
28 | #include <asm/memory.h> | 29 | #include <asm/memory.h> |
29 | #include <asm-generic/pci_iomap.h> | 30 | #include <asm-generic/pci_iomap.h> |
31 | #include <xen/xen.h> | ||
30 | 32 | ||
31 | /* | 33 | /* |
32 | * ISA I/O bus memory addresses are 1:1 with the physical address. | 34 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
@@ -372,6 +374,12 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | |||
372 | #define BIOVEC_MERGEABLE(vec1, vec2) \ | 374 | #define BIOVEC_MERGEABLE(vec1, vec2) \ |
373 | ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) | 375 | ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) |
374 | 376 | ||
377 | extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, | ||
378 | const struct bio_vec *vec2); | ||
379 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | ||
380 | (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ | ||
381 | (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) | ||
382 | |||
375 | #ifdef CONFIG_MMU | 383 | #ifdef CONFIG_MMU |
376 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 384 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
377 | extern int valid_phys_addr_range(phys_addr_t addr, size_t size); | 385 | extern int valid_phys_addr_range(phys_addr_t addr, size_t size); |
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index d7ab99a0c9eb..1317ee40f4df 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h | |||
@@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |||
16 | return PARAVIRT_LAZY_NONE; | 16 | return PARAVIRT_LAZY_NONE; |
17 | } | 17 | } |
18 | 18 | ||
19 | extern struct dma_map_ops *xen_dma_ops; | ||
20 | |||
19 | #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ | 21 | #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ |
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h new file mode 100644 index 000000000000..1109017499e5 --- /dev/null +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H | ||
2 | #define _ASM_ARM_XEN_PAGE_COHERENT_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
9 | dma_addr_t *dma_handle, gfp_t flags, | ||
10 | struct dma_attrs *attrs) | ||
11 | { | ||
12 | return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); | ||
13 | } | ||
14 | |||
15 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
16 | void *cpu_addr, dma_addr_t dma_handle, | ||
17 | struct dma_attrs *attrs) | ||
18 | { | ||
19 | __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); | ||
20 | } | ||
21 | |||
22 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
24 | struct dma_attrs *attrs) | ||
25 | { | ||
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
27 | } | ||
28 | |||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
30 | size_t size, enum dma_data_direction dir, | ||
31 | struct dma_attrs *attrs) | ||
32 | { | ||
33 | if (__generic_dma_ops(hwdev)->unmap_page) | ||
34 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
35 | } | ||
36 | |||
37 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
38 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
39 | { | ||
40 | if (__generic_dma_ops(hwdev)->sync_single_for_cpu) | ||
41 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
42 | } | ||
43 | |||
44 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
45 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
46 | { | ||
47 | if (__generic_dma_ops(hwdev)->sync_single_for_device) | ||
48 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
49 | } | ||
50 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 359a7b50b158..71bb779f2761 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -6,12 +6,12 @@ | |||
6 | 6 | ||
7 | #include <linux/pfn.h> | 7 | #include <linux/pfn.h> |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/dma-mapping.h> | ||
9 | 10 | ||
11 | #include <xen/xen.h> | ||
10 | #include <xen/interface/grant_table.h> | 12 | #include <xen/interface/grant_table.h> |
11 | 13 | ||
12 | #define pfn_to_mfn(pfn) (pfn) | ||
13 | #define phys_to_machine_mapping_valid(pfn) (1) | 14 | #define phys_to_machine_mapping_valid(pfn) (1) |
14 | #define mfn_to_pfn(mfn) (mfn) | ||
15 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) | 15 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) |
16 | 16 | ||
17 | #define pte_mfn pte_pfn | 17 | #define pte_mfn pte_pfn |
@@ -32,6 +32,44 @@ typedef struct xpaddr { | |||
32 | 32 | ||
33 | #define INVALID_P2M_ENTRY (~0UL) | 33 | #define INVALID_P2M_ENTRY (~0UL) |
34 | 34 | ||
35 | unsigned long __pfn_to_mfn(unsigned long pfn); | ||
36 | unsigned long __mfn_to_pfn(unsigned long mfn); | ||
37 | extern struct rb_root phys_to_mach; | ||
38 | |||
39 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | ||
40 | { | ||
41 | unsigned long mfn; | ||
42 | |||
43 | if (phys_to_mach.rb_node != NULL) { | ||
44 | mfn = __pfn_to_mfn(pfn); | ||
45 | if (mfn != INVALID_P2M_ENTRY) | ||
46 | return mfn; | ||
47 | } | ||
48 | |||
49 | if (xen_initial_domain()) | ||
50 | return pfn; | ||
51 | else | ||
52 | return INVALID_P2M_ENTRY; | ||
53 | } | ||
54 | |||
55 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | ||
56 | { | ||
57 | unsigned long pfn; | ||
58 | |||
59 | if (phys_to_mach.rb_node != NULL) { | ||
60 | pfn = __mfn_to_pfn(mfn); | ||
61 | if (pfn != INVALID_P2M_ENTRY) | ||
62 | return pfn; | ||
63 | } | ||
64 | |||
65 | if (xen_initial_domain()) | ||
66 | return mfn; | ||
67 | else | ||
68 | return INVALID_P2M_ENTRY; | ||
69 | } | ||
70 | |||
71 | #define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) | ||
72 | |||
35 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) | 73 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) |
36 | { | 74 | { |
37 | unsigned offset = phys.paddr & ~PAGE_MASK; | 75 | unsigned offset = phys.paddr & ~PAGE_MASK; |
@@ -76,11 +114,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte) | |||
76 | return 0; | 114 | return 0; |
77 | } | 115 | } |
78 | 116 | ||
79 | static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 117 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
80 | { | 118 | bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, |
81 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | 119 | unsigned long nr_pages); |
82 | return true; | ||
83 | } | ||
84 | 120 | ||
85 | static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 121 | static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
86 | { | 122 | { |