aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/xen
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2014-11-21 06:06:39 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 07:41:53 -0500
commit5121872afe0e6470bc6b41637b258fa6e314be4f (patch)
tree184de018a4ef429eec9ffbd83e7b3e9379c7374f /arch/arm/xen
parent3567258d281b5b515d5165ed23851d9f84087e7d (diff)
xen/arm/arm64: merge xen/mm32.c into xen/mm.c
Merge xen/mm32.c into xen/mm.c. As a consequence the code gets compiled on arm64 too. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/xen')
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/mm.c84
-rw-r--r--arch/arm/xen/mm32.c94
3 files changed, 85 insertions, 95 deletions
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 1f85bfe6b470..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index b0e77de99148..ab700e1e5922 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -1,6 +1,10 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
1#include <linux/bootmem.h> 3#include <linux/bootmem.h>
2#include <linux/gfp.h> 4#include <linux/gfp.h>
5#include <linux/highmem.h>
3#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/of_address.h>
4#include <linux/slab.h> 8#include <linux/slab.h>
5#include <linux/types.h> 9#include <linux/types.h>
6#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
@@ -16,6 +20,86 @@
16#include <asm/xen/hypercall.h> 20#include <asm/xen/hypercall.h>
17#include <asm/xen/interface.h> 21#include <asm/xen/interface.h>
18 22
23enum dma_cache_op {
24 DMA_UNMAP,
25 DMA_MAP,
26};
27
28/* functions called by SWIOTLB */
29
30static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
31 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
32{
33 unsigned long pfn;
34 size_t left = size;
35
36 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
37 offset %= PAGE_SIZE;
38
39 do {
40 size_t len = left;
41
42 /* TODO: cache flush */
43
44 offset = 0;
45 pfn++;
46 left -= len;
47 } while (left);
48}
49
50static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
51 size_t size, enum dma_data_direction dir)
52{
53 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
54}
55
56static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
57 size_t size, enum dma_data_direction dir)
58{
59 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
60}
61
62void __xen_dma_map_page(struct device *hwdev, struct page *page,
63 dma_addr_t dev_addr, unsigned long offset, size_t size,
64 enum dma_data_direction dir, struct dma_attrs *attrs)
65{
66 if (is_device_dma_coherent(hwdev))
67 return;
68 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
69 return;
70
71 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
72}
73
74void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
75 size_t size, enum dma_data_direction dir,
76 struct dma_attrs *attrs)
77
78{
79 if (is_device_dma_coherent(hwdev))
80 return;
81 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
82 return;
83
84 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
85}
86
87void __xen_dma_sync_single_for_cpu(struct device *hwdev,
88 dma_addr_t handle, size_t size, enum dma_data_direction dir)
89{
90 if (is_device_dma_coherent(hwdev))
91 return;
92 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
93}
94
95void __xen_dma_sync_single_for_device(struct device *hwdev,
96 dma_addr_t handle, size_t size, enum dma_data_direction dir)
97{
98 if (is_device_dma_coherent(hwdev))
99 return;
100 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
101}
102
19int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 103int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
20 unsigned int address_bits, 104 unsigned int address_bits,
21 dma_addr_t *dma_handle) 105 dma_addr_t *dma_handle)
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
deleted file mode 100644
index c86919bbea83..000000000000
--- a/arch/arm/xen/mm32.c
+++ /dev/null
@@ -1,94 +0,0 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
7enum dma_cache_op {
8 DMA_UNMAP,
9 DMA_MAP,
10};
11
12/* functions called by SWIOTLB */
13
14static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
15 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
16{
17 unsigned long pfn;
18 size_t left = size;
19
20 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
21 offset %= PAGE_SIZE;
22
23 do {
24 size_t len = left;
25
26 /* TODO: cache flush */
27
28 offset = 0;
29 pfn++;
30 left -= len;
31 } while (left);
32}
33
34static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
35 size_t size, enum dma_data_direction dir)
36{
37 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
38}
39
40static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
41 size_t size, enum dma_data_direction dir)
42{
43 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
44}
45
46void __xen_dma_map_page(struct device *hwdev, struct page *page,
47 dma_addr_t dev_addr, unsigned long offset, size_t size,
48 enum dma_data_direction dir, struct dma_attrs *attrs)
49{
50 if (is_device_dma_coherent(hwdev))
51 return;
52 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
53 return;
54
55 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
56}
57
58void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
59 size_t size, enum dma_data_direction dir,
60 struct dma_attrs *attrs)
61
62{
63 if (is_device_dma_coherent(hwdev))
64 return;
65 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
66 return;
67
68 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
69}
70
71void __xen_dma_sync_single_for_cpu(struct device *hwdev,
72 dma_addr_t handle, size_t size, enum dma_data_direction dir)
73{
74 if (is_device_dma_coherent(hwdev))
75 return;
76 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
77}
78
79void __xen_dma_sync_single_for_device(struct device *hwdev,
80 dma_addr_t handle, size_t size, enum dma_data_direction dir)
81{
82 if (is_device_dma_coherent(hwdev))
83 return;
84 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
85}
86
87int __init xen_mm32_init(void)
88{
89 if (!xen_initial_domain())
90 return 0;
91
92 return 0;
93}
94arch_initcall(xen_mm32_init);