aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-09-12 20:45:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-09-12 20:45:27 -0400
commitfc486b03cae382601b366ab460b05e1a01bf69cd (patch)
tree4b7bcb5ddebfc24662bad6eda9b6f3c9a515df10 /arch
parent471cff7c42b56dbbd69003c25bbd97cf6776d464 (diff)
parentd50582e06fd5a58281151e097ff68b02ca65cf2d (diff)
Merge tag 'stable/for-linus-3.17-b-rc4-arm-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen ARM bugfix from Stefano Stabellini: "The patches fix the "xen_add_mach_to_phys_entry: cannot add" bug that has been affecting xen on arm and arm64 guests since 3.16. They require a few hypervisor side changes that just went in xen-unstable. A couple of days ago David sent out a pull request with a few other Xen fixes (it is already in master). Sorry we didn't synchronized better among us" * tag 'stable/for-linus-3.17-b-rc4-arm-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/arm: remove mach_to_phys rbtree xen/arm: reimplement xen_dma_unmap_page & friends xen/arm: introduce XENFEAT_grant_map_identity
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h25
-rw-r--r--arch/arm/include/asm/xen/page.h9
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm/xen/mm32.c202
-rw-r--r--arch/arm/xen/p2m.c66
6 files changed, 217 insertions, 93 deletions
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 1109017499e5..e8275ea88e88 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
27} 27}
28 28
29static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 29void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 30 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs) 31 struct dma_attrs *attrs);
32{
33 if (__generic_dma_ops(hwdev)->unmap_page)
34 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
35}
36 32
37static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 33void xen_dma_sync_single_for_cpu(struct device *hwdev,
38 dma_addr_t handle, size_t size, enum dma_data_direction dir) 34 dma_addr_t handle, size_t size, enum dma_data_direction dir);
39{ 35
40 if (__generic_dma_ops(hwdev)->sync_single_for_cpu) 36void xen_dma_sync_single_for_device(struct device *hwdev,
41 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 37 dma_addr_t handle, size_t size, enum dma_data_direction dir);
42}
43 38
44static inline void xen_dma_sync_single_for_device(struct device *hwdev,
45 dma_addr_t handle, size_t size, enum dma_data_direction dir)
46{
47 if (__generic_dma_ops(hwdev)->sync_single_for_device)
48 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
49}
50#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ 39#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index ded062f9b358..135c24a5ba26 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -33,7 +33,6 @@ typedef struct xpaddr {
33#define INVALID_P2M_ENTRY (~0UL) 33#define INVALID_P2M_ENTRY (~0UL)
34 34
35unsigned long __pfn_to_mfn(unsigned long pfn); 35unsigned long __pfn_to_mfn(unsigned long pfn);
36unsigned long __mfn_to_pfn(unsigned long mfn);
37extern struct rb_root phys_to_mach; 36extern struct rb_root phys_to_mach;
38 37
39static inline unsigned long pfn_to_mfn(unsigned long pfn) 38static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
51 50
52static inline unsigned long mfn_to_pfn(unsigned long mfn) 51static inline unsigned long mfn_to_pfn(unsigned long mfn)
53{ 52{
54 unsigned long pfn;
55
56 if (phys_to_mach.rb_node != NULL) {
57 pfn = __mfn_to_pfn(mfn);
58 if (pfn != INVALID_P2M_ENTRY)
59 return pfn;
60 }
61
62 return mfn; 53 return mfn;
63} 54}
64 55
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 12969523414c..1f85bfe6b470 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 98544c5f86e9..0e15f011f9c8 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void)
260 xen_domain_type = XEN_HVM_DOMAIN; 260 xen_domain_type = XEN_HVM_DOMAIN;
261 261
262 xen_setup_features(); 262 xen_setup_features();
263
264 if (!xen_feature(XENFEAT_grant_map_identity)) {
265 pr_warn("Please upgrade your Xen.\n"
266 "If your platform has any non-coherent DMA devices, they won't work properly.\n");
267 }
268
263 if (xen_feature(XENFEAT_dom0)) 269 if (xen_feature(XENFEAT_dom0))
264 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 270 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
265 else 271 else
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
new file mode 100644
index 000000000000..3b99860fd7ae
--- /dev/null
+++ b/arch/arm/xen/mm32.c
@@ -0,0 +1,202 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
7
8static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
9static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
10
11static int alloc_xen_mm32_scratch_page(int cpu)
12{
13 struct page *page;
14 unsigned long virt;
15 pmd_t *pmdp;
16 pte_t *ptep;
17
18 if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
19 return 0;
20
21 page = alloc_page(GFP_KERNEL);
22 if (page == NULL) {
23 pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
24 return -ENOMEM;
25 }
26
27 virt = (unsigned long)__va(page_to_phys(page));
28 pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
29 ptep = pte_offset_kernel(pmdp, virt);
30
31 per_cpu(xen_mm32_scratch_virt, cpu) = virt;
32 per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
33
34 return 0;
35}
36
37static int xen_mm32_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu)
39{
40 int cpu = (long)hcpu;
41 switch (action) {
42 case CPU_UP_PREPARE:
43 if (alloc_xen_mm32_scratch_page(cpu))
44 return NOTIFY_BAD;
45 break;
46 default:
47 break;
48 }
49 return NOTIFY_OK;
50}
51
52static struct notifier_block xen_mm32_cpu_notifier = {
53 .notifier_call = xen_mm32_cpu_notify,
54};
55
56static void* xen_mm32_remap_page(dma_addr_t handle)
57{
58 unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
59 pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
60
61 *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
62 local_flush_tlb_kernel_page(virt);
63
64 return (void*)virt;
65}
66
67static void xen_mm32_unmap(void *vaddr)
68{
69 put_cpu_var(xen_mm32_scratch_virt);
70}
71
72
73/* functions called by SWIOTLB */
74
75static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
76 size_t size, enum dma_data_direction dir,
77 void (*op)(const void *, size_t, int))
78{
79 unsigned long pfn;
80 size_t left = size;
81
82 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
83 offset %= PAGE_SIZE;
84
85 do {
86 size_t len = left;
87 void *vaddr;
88
89 if (!pfn_valid(pfn))
90 {
91 /* Cannot map the page, we don't know its physical address.
92 * Return and hope for the best */
93 if (!xen_feature(XENFEAT_grant_map_identity))
94 return;
95 vaddr = xen_mm32_remap_page(handle) + offset;
96 op(vaddr, len, dir);
97 xen_mm32_unmap(vaddr - offset);
98 } else {
99 struct page *page = pfn_to_page(pfn);
100
101 if (PageHighMem(page)) {
102 if (len + offset > PAGE_SIZE)
103 len = PAGE_SIZE - offset;
104
105 if (cache_is_vipt_nonaliasing()) {
106 vaddr = kmap_atomic(page);
107 op(vaddr + offset, len, dir);
108 kunmap_atomic(vaddr);
109 } else {
110 vaddr = kmap_high_get(page);
111 if (vaddr) {
112 op(vaddr + offset, len, dir);
113 kunmap_high(page);
114 }
115 }
116 } else {
117 vaddr = page_address(page) + offset;
118 op(vaddr, len, dir);
119 }
120 }
121
122 offset = 0;
123 pfn++;
124 left -= len;
125 } while (left);
126}
127
128static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
129 size_t size, enum dma_data_direction dir)
130{
131 /* Cannot use __dma_page_dev_to_cpu because we don't have a
132 * struct page for handle */
133
134 if (dir != DMA_TO_DEVICE)
135 outer_inv_range(handle, handle + size);
136
137 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
138}
139
140static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
141 size_t size, enum dma_data_direction dir)
142{
143
144 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
145
146 if (dir == DMA_FROM_DEVICE) {
147 outer_inv_range(handle, handle + size);
148 } else {
149 outer_clean_range(handle, handle + size);
150 }
151}
152
153void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
154 size_t size, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
156
157{
158 if (!__generic_dma_ops(hwdev)->unmap_page)
159 return;
160 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
161 return;
162
163 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
164}
165
166void xen_dma_sync_single_for_cpu(struct device *hwdev,
167 dma_addr_t handle, size_t size, enum dma_data_direction dir)
168{
169 if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
170 return;
171 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
172}
173
174void xen_dma_sync_single_for_device(struct device *hwdev,
175 dma_addr_t handle, size_t size, enum dma_data_direction dir)
176{
177 if (!__generic_dma_ops(hwdev)->sync_single_for_device)
178 return;
179 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
180}
181
182int __init xen_mm32_init(void)
183{
184 int cpu;
185
186 if (!xen_initial_domain())
187 return 0;
188
189 register_cpu_notifier(&xen_mm32_cpu_notifier);
190 get_online_cpus();
191 for_each_online_cpu(cpu) {
192 if (alloc_xen_mm32_scratch_page(cpu)) {
193 put_online_cpus();
194 unregister_cpu_notifier(&xen_mm32_cpu_notifier);
195 return -ENOMEM;
196 }
197 }
198 put_online_cpus();
199
200 return 0;
201}
202arch_initcall(xen_mm32_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 97baf4427817..054857776254 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -21,14 +21,12 @@ struct xen_p2m_entry {
21 unsigned long pfn; 21 unsigned long pfn;
22 unsigned long mfn; 22 unsigned long mfn;
23 unsigned long nr_pages; 23 unsigned long nr_pages;
24 struct rb_node rbnode_mach;
25 struct rb_node rbnode_phys; 24 struct rb_node rbnode_phys;
26}; 25};
27 26
28static rwlock_t p2m_lock; 27static rwlock_t p2m_lock;
29struct rb_root phys_to_mach = RB_ROOT; 28struct rb_root phys_to_mach = RB_ROOT;
30EXPORT_SYMBOL_GPL(phys_to_mach); 29EXPORT_SYMBOL_GPL(phys_to_mach);
31static struct rb_root mach_to_phys = RB_ROOT;
32 30
33static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) 31static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
34{ 32{
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
41 parent = *link; 39 parent = *link;
42 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); 40 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
43 41
44 if (new->mfn == entry->mfn)
45 goto err_out;
46 if (new->pfn == entry->pfn) 42 if (new->pfn == entry->pfn)
47 goto err_out; 43 goto err_out;
48 44
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
88} 84}
89EXPORT_SYMBOL_GPL(__pfn_to_mfn); 85EXPORT_SYMBOL_GPL(__pfn_to_mfn);
90 86
91static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
92{
93 struct rb_node **link = &mach_to_phys.rb_node;
94 struct rb_node *parent = NULL;
95 struct xen_p2m_entry *entry;
96 int rc = 0;
97
98 while (*link) {
99 parent = *link;
100 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
101
102 if (new->mfn == entry->mfn)
103 goto err_out;
104 if (new->pfn == entry->pfn)
105 goto err_out;
106
107 if (new->mfn < entry->mfn)
108 link = &(*link)->rb_left;
109 else
110 link = &(*link)->rb_right;
111 }
112 rb_link_node(&new->rbnode_mach, parent, link);
113 rb_insert_color(&new->rbnode_mach, &mach_to_phys);
114 goto out;
115
116err_out:
117 rc = -EINVAL;
118 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
119 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
120out:
121 return rc;
122}
123
124unsigned long __mfn_to_pfn(unsigned long mfn)
125{
126 struct rb_node *n = mach_to_phys.rb_node;
127 struct xen_p2m_entry *entry;
128 unsigned long irqflags;
129
130 read_lock_irqsave(&p2m_lock, irqflags);
131 while (n) {
132 entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
133 if (entry->mfn <= mfn &&
134 entry->mfn + entry->nr_pages > mfn) {
135 read_unlock_irqrestore(&p2m_lock, irqflags);
136 return entry->pfn + (mfn - entry->mfn);
137 }
138 if (mfn < entry->mfn)
139 n = n->rb_left;
140 else
141 n = n->rb_right;
142 }
143 read_unlock_irqrestore(&p2m_lock, irqflags);
144
145 return INVALID_P2M_ENTRY;
146}
147EXPORT_SYMBOL_GPL(__mfn_to_pfn);
148
149int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 87int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
150 struct gnttab_map_grant_ref *kmap_ops, 88 struct gnttab_map_grant_ref *kmap_ops,
151 struct page **pages, unsigned int count) 89 struct page **pages, unsigned int count)
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
192 p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); 130 p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
193 if (p2m_entry->pfn <= pfn && 131 if (p2m_entry->pfn <= pfn &&
194 p2m_entry->pfn + p2m_entry->nr_pages > pfn) { 132 p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
195 rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
196 rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); 133 rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
197 write_unlock_irqrestore(&p2m_lock, irqflags); 134 write_unlock_irqrestore(&p2m_lock, irqflags);
198 kfree(p2m_entry); 135 kfree(p2m_entry);
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
217 p2m_entry->mfn = mfn; 154 p2m_entry->mfn = mfn;
218 155
219 write_lock_irqsave(&p2m_lock, irqflags); 156 write_lock_irqsave(&p2m_lock, irqflags);
220 if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || 157 if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
221 (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
222 write_unlock_irqrestore(&p2m_lock, irqflags); 158 write_unlock_irqrestore(&p2m_lock, irqflags);
223 return false; 159 return false;
224 } 160 }