diff options
Diffstat (limited to 'arch/arm/include/asm/xen/page-coherent.h')
-rw-r--r-- | arch/arm/include/asm/xen/page-coherent.h | 66 |
1 files changed, 57 insertions, 9 deletions
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index e8275ea88e88..efd562412850 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -5,6 +5,18 @@ | |||
5 | #include <linux/dma-attrs.h> | 5 | #include <linux/dma-attrs.h> |
6 | #include <linux/dma-mapping.h> | 6 | #include <linux/dma-mapping.h> |
7 | 7 | ||
8 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | ||
9 | dma_addr_t dev_addr, unsigned long offset, size_t size, | ||
10 | enum dma_data_direction dir, struct dma_attrs *attrs); | ||
11 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
12 | size_t size, enum dma_data_direction dir, | ||
13 | struct dma_attrs *attrs); | ||
14 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
15 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | ||
16 | |||
17 | void __xen_dma_sync_single_for_device(struct device *hwdev, | ||
18 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | ||
19 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | 20 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, |
9 | dma_addr_t *dma_handle, gfp_t flags, | 21 | dma_addr_t *dma_handle, gfp_t flags, |
10 | struct dma_attrs *attrs) | 22 | struct dma_attrs *attrs) |
@@ -20,20 +32,56 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | |||
20 | } | 32 | } |
21 | 33 | ||
22 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | 34 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, |
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | 35 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
24 | struct dma_attrs *attrs) | 36 | enum dma_data_direction dir, struct dma_attrs *attrs) |
25 | { | 37 | { |
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | 38 | bool local = PFN_DOWN(dev_addr) == page_to_pfn(page); |
39 | /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise | ||
40 | * is a foreign page grant-mapped in dom0. If the page is local we | ||
41 | * can safely call the native dma_ops function, otherwise we call | ||
42 | * the xen specific function. */ | ||
43 | if (local) | ||
44 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
45 | else | ||
46 | __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); | ||
27 | } | 47 | } |
28 | 48 | ||
29 | void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 49 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
30 | size_t size, enum dma_data_direction dir, | 50 | size_t size, enum dma_data_direction dir, |
31 | struct dma_attrs *attrs); | 51 | struct dma_attrs *attrs) |
52 | { | ||
53 | unsigned long pfn = PFN_DOWN(handle); | ||
54 | /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will | ||
55 | * always return false. If the page is local we can safely call the | ||
56 | * native dma_ops function, otherwise we call the xen specific | ||
57 | * function. */ | ||
58 | if (pfn_valid(pfn)) { | ||
59 | if (__generic_dma_ops(hwdev)->unmap_page) | ||
60 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
61 | } else | ||
62 | __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); | ||
63 | } | ||
32 | 64 | ||
33 | void xen_dma_sync_single_for_cpu(struct device *hwdev, | 65 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
34 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 66 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
67 | { | ||
68 | unsigned long pfn = PFN_DOWN(handle); | ||
69 | if (pfn_valid(pfn)) { | ||
70 | if (__generic_dma_ops(hwdev)->sync_single_for_cpu) | ||
71 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
72 | } else | ||
73 | __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); | ||
74 | } | ||
35 | 75 | ||
36 | void xen_dma_sync_single_for_device(struct device *hwdev, | 76 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, |
37 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 77 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
78 | { | ||
79 | unsigned long pfn = PFN_DOWN(handle); | ||
80 | if (pfn_valid(pfn)) { | ||
81 | if (__generic_dma_ops(hwdev)->sync_single_for_device) | ||
82 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
83 | } else | ||
84 | __xen_dma_sync_single_for_device(hwdev, handle, size, dir); | ||
85 | } | ||
38 | 86 | ||
39 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | 87 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ |