aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/xen
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/xen')
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c5
-rw-r--r--arch/arm/xen/mm.c121
-rw-r--r--arch/arm/xen/mm32.c202
4 files changed, 122 insertions, 208 deletions
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 1f85bfe6b470..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 0e15f011f9c8..c7ca936ebd99 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -261,11 +261,6 @@ static int __init xen_guest_init(void)
261 261
262 xen_setup_features(); 262 xen_setup_features();
263 263
264 if (!xen_feature(XENFEAT_grant_map_identity)) {
265 pr_warn("Please upgrade your Xen.\n"
266 "If your platform has any non-coherent DMA devices, they won't work properly.\n");
267 }
268
269 if (xen_feature(XENFEAT_dom0)) 264 if (xen_feature(XENFEAT_dom0))
270 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 265 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
271 else 266 else
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index b0e77de99148..351b24a979d4 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -1,6 +1,10 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
1#include <linux/bootmem.h> 3#include <linux/bootmem.h>
2#include <linux/gfp.h> 4#include <linux/gfp.h>
5#include <linux/highmem.h>
3#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/of_address.h>
4#include <linux/slab.h> 8#include <linux/slab.h>
5#include <linux/types.h> 9#include <linux/types.h>
6#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
@@ -8,6 +12,7 @@
8#include <linux/swiotlb.h> 12#include <linux/swiotlb.h>
9 13
10#include <xen/xen.h> 14#include <xen/xen.h>
15#include <xen/interface/grant_table.h>
11#include <xen/interface/memory.h> 16#include <xen/interface/memory.h>
12#include <xen/swiotlb-xen.h> 17#include <xen/swiotlb-xen.h>
13 18
@@ -16,6 +21,114 @@
16#include <asm/xen/hypercall.h> 21#include <asm/xen/hypercall.h>
17#include <asm/xen/interface.h> 22#include <asm/xen/interface.h>
18 23
24enum dma_cache_op {
25 DMA_UNMAP,
26 DMA_MAP,
27};
28static bool hypercall_cflush = false;
29
30/* functions called by SWIOTLB */
31
32static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
33 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
34{
35 struct gnttab_cache_flush cflush;
36 unsigned long pfn;
37 size_t left = size;
38
39 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
40 offset %= PAGE_SIZE;
41
42 do {
43 size_t len = left;
44
45 /* buffers in highmem or foreign pages cannot cross page
46 * boundaries */
47 if (len + offset > PAGE_SIZE)
48 len = PAGE_SIZE - offset;
49
50 cflush.op = 0;
51 cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
52 cflush.offset = offset;
53 cflush.length = len;
54
55 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
56 cflush.op = GNTTAB_CACHE_INVAL;
57 if (op == DMA_MAP) {
58 if (dir == DMA_FROM_DEVICE)
59 cflush.op = GNTTAB_CACHE_INVAL;
60 else
61 cflush.op = GNTTAB_CACHE_CLEAN;
62 }
63 if (cflush.op)
64 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
65
66 offset = 0;
67 pfn++;
68 left -= len;
69 } while (left);
70}
71
72static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
73 size_t size, enum dma_data_direction dir)
74{
75 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
76}
77
78static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
79 size_t size, enum dma_data_direction dir)
80{
81 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
82}
83
84void __xen_dma_map_page(struct device *hwdev, struct page *page,
85 dma_addr_t dev_addr, unsigned long offset, size_t size,
86 enum dma_data_direction dir, struct dma_attrs *attrs)
87{
88 if (is_device_dma_coherent(hwdev))
89 return;
90 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
91 return;
92
93 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
94}
95
96void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
97 size_t size, enum dma_data_direction dir,
98 struct dma_attrs *attrs)
99
100{
101 if (is_device_dma_coherent(hwdev))
102 return;
103 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
104 return;
105
106 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
107}
108
109void __xen_dma_sync_single_for_cpu(struct device *hwdev,
110 dma_addr_t handle, size_t size, enum dma_data_direction dir)
111{
112 if (is_device_dma_coherent(hwdev))
113 return;
114 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
115}
116
117void __xen_dma_sync_single_for_device(struct device *hwdev,
118 dma_addr_t handle, size_t size, enum dma_data_direction dir)
119{
120 if (is_device_dma_coherent(hwdev))
121 return;
122 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
123}
124
125bool xen_arch_need_swiotlb(struct device *dev,
126 unsigned long pfn,
127 unsigned long mfn)
128{
129 return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
130}
131
19int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 132int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
20 unsigned int address_bits, 133 unsigned int address_bits,
21 dma_addr_t *dma_handle) 134 dma_addr_t *dma_handle)
@@ -56,10 +169,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
56 169
57int __init xen_mm_init(void) 170int __init xen_mm_init(void)
58{ 171{
172 struct gnttab_cache_flush cflush;
59 if (!xen_initial_domain()) 173 if (!xen_initial_domain())
60 return 0; 174 return 0;
61 xen_swiotlb_init(1, false); 175 xen_swiotlb_init(1, false);
62 xen_dma_ops = &xen_swiotlb_dma_ops; 176 xen_dma_ops = &xen_swiotlb_dma_ops;
177
178 cflush.op = 0;
179 cflush.a.dev_bus_addr = 0;
180 cflush.offset = 0;
181 cflush.length = 0;
182 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
183 hypercall_cflush = true;
63 return 0; 184 return 0;
64} 185}
65arch_initcall(xen_mm_init); 186arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
deleted file mode 100644
index 3b99860fd7ae..000000000000
--- a/arch/arm/xen/mm32.c
+++ /dev/null
@@ -1,202 +0,0 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
7
8static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
9static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
10
11static int alloc_xen_mm32_scratch_page(int cpu)
12{
13 struct page *page;
14 unsigned long virt;
15 pmd_t *pmdp;
16 pte_t *ptep;
17
18 if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
19 return 0;
20
21 page = alloc_page(GFP_KERNEL);
22 if (page == NULL) {
23 pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
24 return -ENOMEM;
25 }
26
27 virt = (unsigned long)__va(page_to_phys(page));
28 pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
29 ptep = pte_offset_kernel(pmdp, virt);
30
31 per_cpu(xen_mm32_scratch_virt, cpu) = virt;
32 per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
33
34 return 0;
35}
36
37static int xen_mm32_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu)
39{
40 int cpu = (long)hcpu;
41 switch (action) {
42 case CPU_UP_PREPARE:
43 if (alloc_xen_mm32_scratch_page(cpu))
44 return NOTIFY_BAD;
45 break;
46 default:
47 break;
48 }
49 return NOTIFY_OK;
50}
51
52static struct notifier_block xen_mm32_cpu_notifier = {
53 .notifier_call = xen_mm32_cpu_notify,
54};
55
56static void* xen_mm32_remap_page(dma_addr_t handle)
57{
58 unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
59 pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
60
61 *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
62 local_flush_tlb_kernel_page(virt);
63
64 return (void*)virt;
65}
66
67static void xen_mm32_unmap(void *vaddr)
68{
69 put_cpu_var(xen_mm32_scratch_virt);
70}
71
72
73/* functions called by SWIOTLB */
74
75static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
76 size_t size, enum dma_data_direction dir,
77 void (*op)(const void *, size_t, int))
78{
79 unsigned long pfn;
80 size_t left = size;
81
82 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
83 offset %= PAGE_SIZE;
84
85 do {
86 size_t len = left;
87 void *vaddr;
88
89 if (!pfn_valid(pfn))
90 {
91 /* Cannot map the page, we don't know its physical address.
92 * Return and hope for the best */
93 if (!xen_feature(XENFEAT_grant_map_identity))
94 return;
95 vaddr = xen_mm32_remap_page(handle) + offset;
96 op(vaddr, len, dir);
97 xen_mm32_unmap(vaddr - offset);
98 } else {
99 struct page *page = pfn_to_page(pfn);
100
101 if (PageHighMem(page)) {
102 if (len + offset > PAGE_SIZE)
103 len = PAGE_SIZE - offset;
104
105 if (cache_is_vipt_nonaliasing()) {
106 vaddr = kmap_atomic(page);
107 op(vaddr + offset, len, dir);
108 kunmap_atomic(vaddr);
109 } else {
110 vaddr = kmap_high_get(page);
111 if (vaddr) {
112 op(vaddr + offset, len, dir);
113 kunmap_high(page);
114 }
115 }
116 } else {
117 vaddr = page_address(page) + offset;
118 op(vaddr, len, dir);
119 }
120 }
121
122 offset = 0;
123 pfn++;
124 left -= len;
125 } while (left);
126}
127
128static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
129 size_t size, enum dma_data_direction dir)
130{
131 /* Cannot use __dma_page_dev_to_cpu because we don't have a
132 * struct page for handle */
133
134 if (dir != DMA_TO_DEVICE)
135 outer_inv_range(handle, handle + size);
136
137 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
138}
139
140static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
141 size_t size, enum dma_data_direction dir)
142{
143
144 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
145
146 if (dir == DMA_FROM_DEVICE) {
147 outer_inv_range(handle, handle + size);
148 } else {
149 outer_clean_range(handle, handle + size);
150 }
151}
152
153void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
154 size_t size, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
156
157{
158 if (!__generic_dma_ops(hwdev)->unmap_page)
159 return;
160 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
161 return;
162
163 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
164}
165
166void xen_dma_sync_single_for_cpu(struct device *hwdev,
167 dma_addr_t handle, size_t size, enum dma_data_direction dir)
168{
169 if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
170 return;
171 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
172}
173
174void xen_dma_sync_single_for_device(struct device *hwdev,
175 dma_addr_t handle, size_t size, enum dma_data_direction dir)
176{
177 if (!__generic_dma_ops(hwdev)->sync_single_for_device)
178 return;
179 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
180}
181
182int __init xen_mm32_init(void)
183{
184 int cpu;
185
186 if (!xen_initial_domain())
187 return 0;
188
189 register_cpu_notifier(&xen_mm32_cpu_notifier);
190 get_online_cpus();
191 for_each_online_cpu(cpu) {
192 if (alloc_xen_mm32_scratch_page(cpu)) {
193 put_online_cpus();
194 unregister_cpu_notifier(&xen_mm32_cpu_notifier);
195 return -ENOMEM;
196 }
197 }
198 put_online_cpus();
199
200 return 0;
201}
202arch_initcall(xen_mm32_init);