aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/xen
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2014-11-11 06:14:45 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 07:41:45 -0500
commite9e87eb3f9180b2b0470409b24fb39b8a1941520 (patch)
treebd1a17a1233be1215a19b4c7464b029ab732ea9e /arch/arm/xen
parent009d0431c3914de64666bec0d350e54fdd59df6a (diff)
xen/arm: remove handling of XENFEAT_grant_map_identity
The feature has been removed from Xen. Also Linux cannot use it on ARM32 without CONFIG_ARM_LPAE. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/xen')
-rw-r--r--arch/arm/xen/enlighten.c5
-rw-r--r--arch/arm/xen/mm32.c85
2 files changed, 1 insertions, 89 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 0e15f011f9c8..c7ca936ebd99 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -261,11 +261,6 @@ static int __init xen_guest_init(void)
261 261
262 xen_setup_features(); 262 xen_setup_features();
263 263
264 if (!xen_feature(XENFEAT_grant_map_identity)) {
265 pr_warn("Please upgrade your Xen.\n"
266 "If your platform has any non-coherent DMA devices, they won't work properly.\n");
267 }
268
269 if (xen_feature(XENFEAT_dom0)) 264 if (xen_feature(XENFEAT_dom0))
270 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 265 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
271 else 266 else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
index 3b99860fd7ae..a5a93fce913b 100644
--- a/arch/arm/xen/mm32.c
+++ b/arch/arm/xen/mm32.c
@@ -5,70 +5,6 @@
5 5
6#include <xen/features.h> 6#include <xen/features.h>
7 7
8static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
9static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
10
11static int alloc_xen_mm32_scratch_page(int cpu)
12{
13 struct page *page;
14 unsigned long virt;
15 pmd_t *pmdp;
16 pte_t *ptep;
17
18 if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
19 return 0;
20
21 page = alloc_page(GFP_KERNEL);
22 if (page == NULL) {
23 pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
24 return -ENOMEM;
25 }
26
27 virt = (unsigned long)__va(page_to_phys(page));
28 pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
29 ptep = pte_offset_kernel(pmdp, virt);
30
31 per_cpu(xen_mm32_scratch_virt, cpu) = virt;
32 per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
33
34 return 0;
35}
36
37static int xen_mm32_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu)
39{
40 int cpu = (long)hcpu;
41 switch (action) {
42 case CPU_UP_PREPARE:
43 if (alloc_xen_mm32_scratch_page(cpu))
44 return NOTIFY_BAD;
45 break;
46 default:
47 break;
48 }
49 return NOTIFY_OK;
50}
51
52static struct notifier_block xen_mm32_cpu_notifier = {
53 .notifier_call = xen_mm32_cpu_notify,
54};
55
56static void* xen_mm32_remap_page(dma_addr_t handle)
57{
58 unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
59 pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
60
61 *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
62 local_flush_tlb_kernel_page(virt);
63
64 return (void*)virt;
65}
66
67static void xen_mm32_unmap(void *vaddr)
68{
69 put_cpu_var(xen_mm32_scratch_virt);
70}
71
72 8
73/* functions called by SWIOTLB */ 9/* functions called by SWIOTLB */
74 10
@@ -88,13 +24,7 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
88 24
89 if (!pfn_valid(pfn)) 25 if (!pfn_valid(pfn))
90 { 26 {
91 /* Cannot map the page, we don't know its physical address. 27 /* TODO: cache flush */
92 * Return and hope for the best */
93 if (!xen_feature(XENFEAT_grant_map_identity))
94 return;
95 vaddr = xen_mm32_remap_page(handle) + offset;
96 op(vaddr, len, dir);
97 xen_mm32_unmap(vaddr - offset);
98 } else { 28 } else {
99 struct page *page = pfn_to_page(pfn); 29 struct page *page = pfn_to_page(pfn);
100 30
@@ -181,22 +111,9 @@ void xen_dma_sync_single_for_device(struct device *hwdev,
181 111
182int __init xen_mm32_init(void) 112int __init xen_mm32_init(void)
183{ 113{
184 int cpu;
185
186 if (!xen_initial_domain()) 114 if (!xen_initial_domain())
187 return 0; 115 return 0;
188 116
189 register_cpu_notifier(&xen_mm32_cpu_notifier);
190 get_online_cpus();
191 for_each_online_cpu(cpu) {
192 if (alloc_xen_mm32_scratch_page(cpu)) {
193 put_online_cpus();
194 unregister_cpu_notifier(&xen_mm32_cpu_notifier);
195 return -ENOMEM;
196 }
197 }
198 put_online_cpus();
199
200 return 0; 117 return 0;
201} 118}
202arch_initcall(xen_mm32_init); 119arch_initcall(xen_mm32_init);