aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/xen
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2014-11-11 09:31:56 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 07:41:48 -0500
commit2f91fc331ad495a3e602f3d0e7fd6211b1d91204 (patch)
tree9b72ca8a9588b42f947afcb603a6dab55c2d0df5 /arch/arm/xen
parent35e0be88c426e8d2d6a4ed91d80ea25d73d127aa (diff)
xen/arm: if(pfn_valid(pfn)) call native dma_ops
Remove code duplication in mm32.c by calling the native dma_ops if the page is a local page (not a foreign page). Use a simple pfn_valid(pfn) check to figure out if the page is local, exploiting the fact that dom0 is mapped 1:1, therefore pfn_valid always returns false when called on a foreign mfn. Suggested-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/xen')
-rw-r--r--arch/arm/xen/mm32.c50
1 files changed, 11 insertions, 39 deletions
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
index 6153d61f0907..5bb8391188c8 100644
--- a/arch/arm/xen/mm32.c
+++ b/arch/arm/xen/mm32.c
@@ -4,13 +4,15 @@
4#include <linux/highmem.h> 4#include <linux/highmem.h>
5 5
6#include <xen/features.h> 6#include <xen/features.h>
7 7enum dma_cache_op {
8 DMA_UNMAP,
9 DMA_MAP,
10};
8 11
9/* functions called by SWIOTLB */ 12/* functions called by SWIOTLB */
10 13
11static void dma_cache_maint(dma_addr_t handle, unsigned long offset, 14static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
12 size_t size, enum dma_data_direction dir, 15 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
13 void (*op)(const void *, size_t, int))
14{ 16{
15 unsigned long pfn; 17 unsigned long pfn;
16 size_t left = size; 18 size_t left = size;
@@ -20,34 +22,8 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
20 22
21 do { 23 do {
22 size_t len = left; 24 size_t len = left;
23 void *vaddr;
24 25
25 if (!pfn_valid(pfn)) 26 /* TODO: cache flush */
26 {
27 /* TODO: cache flush */
28 } else {
29 struct page *page = pfn_to_page(pfn);
30
31 if (PageHighMem(page)) {
32 if (len + offset > PAGE_SIZE)
33 len = PAGE_SIZE - offset;
34
35 if (cache_is_vipt_nonaliasing()) {
36 vaddr = kmap_atomic(page);
37 op(vaddr + offset, len, dir);
38 kunmap_atomic(vaddr);
39 } else {
40 vaddr = kmap_high_get(page);
41 if (vaddr) {
42 op(vaddr + offset, len, dir);
43 kunmap_high(page);
44 }
45 }
46 } else {
47 vaddr = page_address(page) + offset;
48 op(vaddr, len, dir);
49 }
50 }
51 27
52 offset = 0; 28 offset = 0;
53 pfn++; 29 pfn++;
@@ -58,20 +34,16 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
58static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, 34static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
59 size_t size, enum dma_data_direction dir) 35 size_t size, enum dma_data_direction dir)
60{ 36{
61 /* Cannot use __dma_page_dev_to_cpu because we don't have a 37 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
62 * struct page for handle */
63
64 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
65} 38}
66 39
67static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, 40static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
68 size_t size, enum dma_data_direction dir) 41 size_t size, enum dma_data_direction dir)
69{ 42{
70 43 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
71 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
72} 44}
73 45
74void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 46void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
75 size_t size, enum dma_data_direction dir, 47 size_t size, enum dma_data_direction dir,
76 struct dma_attrs *attrs) 48 struct dma_attrs *attrs)
77 49
@@ -84,7 +56,7 @@ void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
84 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); 56 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
85} 57}
86 58
87void xen_dma_sync_single_for_cpu(struct device *hwdev, 59void __xen_dma_sync_single_for_cpu(struct device *hwdev,
88 dma_addr_t handle, size_t size, enum dma_data_direction dir) 60 dma_addr_t handle, size_t size, enum dma_data_direction dir)
89{ 61{
90 if (!__generic_dma_ops(hwdev)->sync_single_for_cpu) 62 if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
@@ -92,7 +64,7 @@ void xen_dma_sync_single_for_cpu(struct device *hwdev,
92 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); 64 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
93} 65}
94 66
95void xen_dma_sync_single_for_device(struct device *hwdev, 67void __xen_dma_sync_single_for_device(struct device *hwdev,
96 dma_addr_t handle, size_t size, enum dma_data_direction dir) 68 dma_addr_t handle, size_t size, enum dma_data_direction dir)
97{ 69{
98 if (!__generic_dma_ops(hwdev)->sync_single_for_device) 70 if (!__generic_dma_ops(hwdev)->sync_single_for_device)