aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/swiotlb-xen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r--drivers/xen/swiotlb-xen.c101
1 files changed, 75 insertions, 26 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2e74dba6a04d..58db6df866ef 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -144,31 +144,72 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
144 } while (i < nslabs); 144 } while (i < nslabs);
145 return 0; 145 return 0;
146} 146}
147static unsigned long xen_set_nslabs(unsigned long nr_tbl)
148{
149 if (!nr_tbl) {
150 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
151 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
152 } else
153 xen_io_tlb_nslabs = nr_tbl;
154
155 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
156}
157
158enum xen_swiotlb_err {
159 XEN_SWIOTLB_UNKNOWN = 0,
160 XEN_SWIOTLB_ENOMEM,
161 XEN_SWIOTLB_EFIXUP
162};
147 163
148void __init xen_swiotlb_init(int verbose) 164static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
149{ 165{
150 unsigned long bytes; 166 switch (err) {
167 case XEN_SWIOTLB_ENOMEM:
168 return "Cannot allocate Xen-SWIOTLB buffer\n";
169 case XEN_SWIOTLB_EFIXUP:
170 return "Failed to get contiguous memory for DMA from Xen!\n"\
171 "You either: don't have the permissions, do not have"\
172 " enough free memory under 4GB, or the hypervisor memory"\
173 " is too fragmented!";
174 default:
175 break;
176 }
177 return "";
178}
179int __ref xen_swiotlb_init(int verbose, bool early)
180{
181 unsigned long bytes, order;
151 int rc = -ENOMEM; 182 int rc = -ENOMEM;
152 unsigned long nr_tbl; 183 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
153 char *m = NULL;
154 unsigned int repeat = 3; 184 unsigned int repeat = 3;
155 185
156 nr_tbl = swiotlb_nr_tbl(); 186 xen_io_tlb_nslabs = swiotlb_nr_tbl();
157 if (nr_tbl)
158 xen_io_tlb_nslabs = nr_tbl;
159 else {
160 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
161 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
162 }
163retry: 187retry:
164 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; 188 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
165 189 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
166 /* 190 /*
167 * Get IO TLB memory from any location. 191 * Get IO TLB memory from any location.
168 */ 192 */
169 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); 193 if (early)
194 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
195 else {
196#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
197#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
198 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
199 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
200 if (xen_io_tlb_start)
201 break;
202 order--;
203 }
204 if (order != get_order(bytes)) {
205 pr_warn("Warning: only able to allocate %ld MB "
206 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
207 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
208 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
209 }
210 }
170 if (!xen_io_tlb_start) { 211 if (!xen_io_tlb_start) {
171 m = "Cannot allocate Xen-SWIOTLB buffer!\n"; 212 m_ret = XEN_SWIOTLB_ENOMEM;
172 goto error; 213 goto error;
173 } 214 }
174 xen_io_tlb_end = xen_io_tlb_start + bytes; 215 xen_io_tlb_end = xen_io_tlb_start + bytes;
@@ -179,17 +220,22 @@ retry:
179 bytes, 220 bytes,
180 xen_io_tlb_nslabs); 221 xen_io_tlb_nslabs);
181 if (rc) { 222 if (rc) {
182 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); 223 if (early)
183 m = "Failed to get contiguous memory for DMA from Xen!\n"\ 224 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
184 "You either: don't have the permissions, do not have"\ 225 else {
185 " enough free memory under 4GB, or the hypervisor memory"\ 226 free_pages((unsigned long)xen_io_tlb_start, order);
186 "is too fragmented!"; 227 xen_io_tlb_start = NULL;
228 }
229 m_ret = XEN_SWIOTLB_EFIXUP;
187 goto error; 230 goto error;
188 } 231 }
189 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); 232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
190 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); 233 if (early) {
191 234 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
192 return; 235 rc = 0;
236 } else
237 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
238 return rc;
193error: 239error:
194 if (repeat--) { 240 if (repeat--) {
195 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ 241 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
@@ -198,10 +244,13 @@ error:
198 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); 244 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
199 goto retry; 245 goto retry;
200 } 246 }
201 xen_raw_printk("%s (rc:%d)", m, rc); 247 pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
202 panic("%s (rc:%d)", m, rc); 248 if (early)
249 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
250 else
251 free_pages((unsigned long)xen_io_tlb_start, order);
252 return rc;
203} 253}
204
205void * 254void *
206xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 255xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
207 dma_addr_t *dma_handle, gfp_t flags, 256 dma_addr_t *dma_handle, gfp_t flags,