aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/xen/mm32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/xen/mm32.c')
-rw-r--r--arch/arm/xen/mm32.c94
1 files changed, 0 insertions, 94 deletions
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
deleted file mode 100644
index c86919bbea83..000000000000
--- a/arch/arm/xen/mm32.c
+++ /dev/null
@@ -1,94 +0,0 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
7enum dma_cache_op {
8 DMA_UNMAP,
9 DMA_MAP,
10};
11
12/* functions called by SWIOTLB */
13
14static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
15 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
16{
17 unsigned long pfn;
18 size_t left = size;
19
20 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
21 offset %= PAGE_SIZE;
22
23 do {
24 size_t len = left;
25
26 /* TODO: cache flush */
27
28 offset = 0;
29 pfn++;
30 left -= len;
31 } while (left);
32}
33
34static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
35 size_t size, enum dma_data_direction dir)
36{
37 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
38}
39
40static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
41 size_t size, enum dma_data_direction dir)
42{
43 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
44}
45
46void __xen_dma_map_page(struct device *hwdev, struct page *page,
47 dma_addr_t dev_addr, unsigned long offset, size_t size,
48 enum dma_data_direction dir, struct dma_attrs *attrs)
49{
50 if (is_device_dma_coherent(hwdev))
51 return;
52 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
53 return;
54
55 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
56}
57
58void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
59 size_t size, enum dma_data_direction dir,
60 struct dma_attrs *attrs)
61
62{
63 if (is_device_dma_coherent(hwdev))
64 return;
65 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
66 return;
67
68 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
69}
70
71void __xen_dma_sync_single_for_cpu(struct device *hwdev,
72 dma_addr_t handle, size_t size, enum dma_data_direction dir)
73{
74 if (is_device_dma_coherent(hwdev))
75 return;
76 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
77}
78
79void __xen_dma_sync_single_for_device(struct device *hwdev,
80 dma_addr_t handle, size_t size, enum dma_data_direction dir)
81{
82 if (is_device_dma_coherent(hwdev))
83 return;
84 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
85}
86
87int __init xen_mm32_init(void)
88{
89 if (!xen_initial_domain())
90 return 0;
91
92 return 0;
93}
94arch_initcall(xen_mm32_init);