aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/xen
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-11-08 15:36:09 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-11-08 16:10:48 -0500
commite1d8f62ad49a6a7068aa1bdc30252911d71c4dc4 (patch)
treee7ad9bf58ba9b58bf48ff59283ba0c27b03969d1 /arch/arm/xen
parentbad97817dece759dd6c0b24f862b7d0ed588edda (diff)
parent15177608c703e7b4aa29aa7c93b31001effe504c (diff)
Merge remote-tracking branch 'stefano/swiotlb-xen-9.1' into stable/for-linus-3.13
* stefano/swiotlb-xen-9.1: swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary grant-table: call set_phys_to_machine after mapping grant refs arm,arm64: do not always merge biovec if we are running on Xen swiotlb: print a warning when the swiotlb is full swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device swiotlb-xen: use xen_alloc/free_coherent_pages xen: introduce xen_alloc/free_coherent_pages arm64/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain arm/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain swiotlb-xen: introduce xen_swiotlb_set_dma_mask xen/arm,arm64: enable SWIOTLB_XEN xen: make xen_create_contiguous_region return the dma address xen/x86: allow __set_phys_to_machine for autotranslate guests arm/xen,arm64/xen: introduce p2m arm64: define DMA_ERROR_CODE arm: make SWIOTLB available Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Conflicts: arch/arm/include/asm/dma-mapping.h drivers/xen/swiotlb-xen.c [Conflicts arose b/c "arm: make SWIOTLB available" v8 was in Stefano's branch, while I had v9 + Ack from Russel. I also fixed up white-space issues]
Diffstat (limited to 'arch/arm/xen')
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/mm.c65
-rw-r--r--arch/arm/xen/p2m.c208
3 files changed, 274 insertions, 1 deletions
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 43841033afd3..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
obj-y := enlighten.o hypercall.o grant-table.o obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
new file mode 100644
index 000000000000..b0e77de99148
--- /dev/null
+++ b/arch/arm/xen/mm.c
@@ -0,0 +1,65 @@
1#include <linux/bootmem.h>
2#include <linux/gfp.h>
3#include <linux/export.h>
4#include <linux/slab.h>
5#include <linux/types.h>
6#include <linux/dma-mapping.h>
7#include <linux/vmalloc.h>
8#include <linux/swiotlb.h>
9
10#include <xen/xen.h>
11#include <xen/interface/memory.h>
12#include <xen/swiotlb-xen.h>
13
14#include <asm/cacheflush.h>
15#include <asm/xen/page.h>
16#include <asm/xen/hypercall.h>
17#include <asm/xen/interface.h>
18
19int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
20 unsigned int address_bits,
21 dma_addr_t *dma_handle)
22{
23 if (!xen_initial_domain())
24 return -EINVAL;
25
26 /* we assume that dom0 is mapped 1:1 for now */
27 *dma_handle = pstart;
28 return 0;
29}
30EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
31
32void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
33{
34 return;
35}
36EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
37
38struct dma_map_ops *xen_dma_ops;
39EXPORT_SYMBOL_GPL(xen_dma_ops);
40
41static struct dma_map_ops xen_swiotlb_dma_ops = {
42 .mapping_error = xen_swiotlb_dma_mapping_error,
43 .alloc = xen_swiotlb_alloc_coherent,
44 .free = xen_swiotlb_free_coherent,
45 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
46 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
47 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
48 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
49 .map_sg = xen_swiotlb_map_sg_attrs,
50 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
51 .map_page = xen_swiotlb_map_page,
52 .unmap_page = xen_swiotlb_unmap_page,
53 .dma_supported = xen_swiotlb_dma_supported,
54 .set_dma_mask = xen_swiotlb_set_dma_mask,
55};
56
57int __init xen_mm_init(void)
58{
59 if (!xen_initial_domain())
60 return 0;
61 xen_swiotlb_init(1, false);
62 xen_dma_ops = &xen_swiotlb_dma_ops;
63 return 0;
64}
65arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
new file mode 100644
index 000000000000..23732cdff551
--- /dev/null
+++ b/arch/arm/xen/p2m.c
@@ -0,0 +1,208 @@
1#include <linux/bootmem.h>
2#include <linux/gfp.h>
3#include <linux/export.h>
4#include <linux/rwlock.h>
5#include <linux/slab.h>
6#include <linux/types.h>
7#include <linux/dma-mapping.h>
8#include <linux/vmalloc.h>
9#include <linux/swiotlb.h>
10
11#include <xen/xen.h>
12#include <xen/interface/memory.h>
13#include <xen/swiotlb-xen.h>
14
15#include <asm/cacheflush.h>
16#include <asm/xen/page.h>
17#include <asm/xen/hypercall.h>
18#include <asm/xen/interface.h>
19
20struct xen_p2m_entry {
21 unsigned long pfn;
22 unsigned long mfn;
23 unsigned long nr_pages;
24 struct rb_node rbnode_mach;
25 struct rb_node rbnode_phys;
26};
27
28rwlock_t p2m_lock;
29struct rb_root phys_to_mach = RB_ROOT;
30static struct rb_root mach_to_phys = RB_ROOT;
31
32static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
33{
34 struct rb_node **link = &phys_to_mach.rb_node;
35 struct rb_node *parent = NULL;
36 struct xen_p2m_entry *entry;
37 int rc = 0;
38
39 while (*link) {
40 parent = *link;
41 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
42
43 if (new->mfn == entry->mfn)
44 goto err_out;
45 if (new->pfn == entry->pfn)
46 goto err_out;
47
48 if (new->pfn < entry->pfn)
49 link = &(*link)->rb_left;
50 else
51 link = &(*link)->rb_right;
52 }
53 rb_link_node(&new->rbnode_phys, parent, link);
54 rb_insert_color(&new->rbnode_phys, &phys_to_mach);
55 goto out;
56
57err_out:
58 rc = -EINVAL;
59 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
60 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
61out:
62 return rc;
63}
64
65unsigned long __pfn_to_mfn(unsigned long pfn)
66{
67 struct rb_node *n = phys_to_mach.rb_node;
68 struct xen_p2m_entry *entry;
69 unsigned long irqflags;
70
71 read_lock_irqsave(&p2m_lock, irqflags);
72 while (n) {
73 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
74 if (entry->pfn <= pfn &&
75 entry->pfn + entry->nr_pages > pfn) {
76 read_unlock_irqrestore(&p2m_lock, irqflags);
77 return entry->mfn + (pfn - entry->pfn);
78 }
79 if (pfn < entry->pfn)
80 n = n->rb_left;
81 else
82 n = n->rb_right;
83 }
84 read_unlock_irqrestore(&p2m_lock, irqflags);
85
86 return INVALID_P2M_ENTRY;
87}
88EXPORT_SYMBOL_GPL(__pfn_to_mfn);
89
90static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
91{
92 struct rb_node **link = &mach_to_phys.rb_node;
93 struct rb_node *parent = NULL;
94 struct xen_p2m_entry *entry;
95 int rc = 0;
96
97 while (*link) {
98 parent = *link;
99 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
100
101 if (new->mfn == entry->mfn)
102 goto err_out;
103 if (new->pfn == entry->pfn)
104 goto err_out;
105
106 if (new->mfn < entry->mfn)
107 link = &(*link)->rb_left;
108 else
109 link = &(*link)->rb_right;
110 }
111 rb_link_node(&new->rbnode_mach, parent, link);
112 rb_insert_color(&new->rbnode_mach, &mach_to_phys);
113 goto out;
114
115err_out:
116 rc = -EINVAL;
117 pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
118 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
119out:
120 return rc;
121}
122
123unsigned long __mfn_to_pfn(unsigned long mfn)
124{
125 struct rb_node *n = mach_to_phys.rb_node;
126 struct xen_p2m_entry *entry;
127 unsigned long irqflags;
128
129 read_lock_irqsave(&p2m_lock, irqflags);
130 while (n) {
131 entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
132 if (entry->mfn <= mfn &&
133 entry->mfn + entry->nr_pages > mfn) {
134 read_unlock_irqrestore(&p2m_lock, irqflags);
135 return entry->pfn + (mfn - entry->mfn);
136 }
137 if (mfn < entry->mfn)
138 n = n->rb_left;
139 else
140 n = n->rb_right;
141 }
142 read_unlock_irqrestore(&p2m_lock, irqflags);
143
144 return INVALID_P2M_ENTRY;
145}
146EXPORT_SYMBOL_GPL(__mfn_to_pfn);
147
148bool __set_phys_to_machine_multi(unsigned long pfn,
149 unsigned long mfn, unsigned long nr_pages)
150{
151 int rc;
152 unsigned long irqflags;
153 struct xen_p2m_entry *p2m_entry;
154 struct rb_node *n = phys_to_mach.rb_node;
155
156 if (mfn == INVALID_P2M_ENTRY) {
157 write_lock_irqsave(&p2m_lock, irqflags);
158 while (n) {
159 p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
160 if (p2m_entry->pfn <= pfn &&
161 p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
162 rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
163 rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
164 write_unlock_irqrestore(&p2m_lock, irqflags);
165 kfree(p2m_entry);
166 return true;
167 }
168 if (pfn < p2m_entry->pfn)
169 n = n->rb_left;
170 else
171 n = n->rb_right;
172 }
173 write_unlock_irqrestore(&p2m_lock, irqflags);
174 return true;
175 }
176
177 p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
178 if (!p2m_entry) {
179 pr_warn("cannot allocate xen_p2m_entry\n");
180 return false;
181 }
182 p2m_entry->pfn = pfn;
183 p2m_entry->nr_pages = nr_pages;
184 p2m_entry->mfn = mfn;
185
186 write_lock_irqsave(&p2m_lock, irqflags);
187 if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
188 (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
189 write_unlock_irqrestore(&p2m_lock, irqflags);
190 return false;
191 }
192 write_unlock_irqrestore(&p2m_lock, irqflags);
193 return true;
194}
195EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
196
197bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
198{
199 return __set_phys_to_machine_multi(pfn, mfn, 1);
200}
201EXPORT_SYMBOL_GPL(__set_phys_to_machine);
202
203int p2m_init(void)
204{
205 rwlock_init(&p2m_lock);
206 return 0;
207}
208arch_initcall(p2m_init);