diff options
Diffstat (limited to 'drivers/xen/swiotlb-xen.c')
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 70 |
1 files changed, 53 insertions, 17 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 6e8c15a23201..c984768d98ca 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <xen/swiotlb-xen.h> | 38 | #include <xen/swiotlb-xen.h> |
39 | #include <xen/page.h> | 39 | #include <xen/page.h> |
40 | #include <xen/xen-ops.h> | 40 | #include <xen/xen-ops.h> |
41 | #include <xen/hvc-console.h> | ||
41 | /* | 42 | /* |
42 | * Used to do a quick range check in swiotlb_tbl_unmap_single and | 43 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
43 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this | 44 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
@@ -146,8 +147,10 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) | |||
146 | void __init xen_swiotlb_init(int verbose) | 147 | void __init xen_swiotlb_init(int verbose) |
147 | { | 148 | { |
148 | unsigned long bytes; | 149 | unsigned long bytes; |
149 | int rc; | 150 | int rc = -ENOMEM; |
150 | unsigned long nr_tbl; | 151 | unsigned long nr_tbl; |
152 | char *m = NULL; | ||
153 | unsigned int repeat = 3; | ||
151 | 154 | ||
152 | nr_tbl = swioltb_nr_tbl(); | 155 | nr_tbl = swioltb_nr_tbl(); |
153 | if (nr_tbl) | 156 | if (nr_tbl) |
@@ -156,16 +159,17 @@ void __init xen_swiotlb_init(int verbose) | |||
156 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); | 159 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); |
157 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); | 160 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); |
158 | } | 161 | } |
159 | 162 | retry: | |
160 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; | 163 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; |
161 | 164 | ||
162 | /* | 165 | /* |
163 | * Get IO TLB memory from any location. | 166 | * Get IO TLB memory from any location. |
164 | */ | 167 | */ |
165 | xen_io_tlb_start = alloc_bootmem(bytes); | 168 | xen_io_tlb_start = alloc_bootmem(bytes); |
166 | if (!xen_io_tlb_start) | 169 | if (!xen_io_tlb_start) { |
167 | panic("Cannot allocate SWIOTLB buffer"); | 170 | m = "Cannot allocate Xen-SWIOTLB buffer!\n"; |
168 | 171 | goto error; | |
172 | } | ||
169 | xen_io_tlb_end = xen_io_tlb_start + bytes; | 173 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
170 | /* | 174 | /* |
171 | * And replace that memory with pages under 4GB. | 175 | * And replace that memory with pages under 4GB. |
@@ -173,17 +177,28 @@ void __init xen_swiotlb_init(int verbose) | |||
173 | rc = xen_swiotlb_fixup(xen_io_tlb_start, | 177 | rc = xen_swiotlb_fixup(xen_io_tlb_start, |
174 | bytes, | 178 | bytes, |
175 | xen_io_tlb_nslabs); | 179 | xen_io_tlb_nslabs); |
176 | if (rc) | 180 | if (rc) { |
181 | free_bootmem(__pa(xen_io_tlb_start), bytes); | ||
182 | m = "Failed to get contiguous memory for DMA from Xen!\n"\ | ||
183 | "You either: don't have the permissions, do not have"\ | ||
184 | " enough free memory under 4GB, or the hypervisor memory"\ | ||
185 | "is too fragmented!"; | ||
177 | goto error; | 186 | goto error; |
178 | 187 | } | |
179 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); | 188 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
180 | swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); | 189 | swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); |
181 | 190 | ||
182 | return; | 191 | return; |
183 | error: | 192 | error: |
184 | panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\ | 193 | if (repeat--) { |
185 | "We either don't have the permission or you do not have enough"\ | 194 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ |
186 | "free memory under 4GB!\n", rc); | 195 | (xen_io_tlb_nslabs >> 1)); |
196 | printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n", | ||
197 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); | ||
198 | goto retry; | ||
199 | } | ||
200 | xen_raw_printk("%s (rc:%d)", m, rc); | ||
201 | panic("%s (rc:%d)", m, rc); | ||
187 | } | 202 | } |
188 | 203 | ||
189 | void * | 204 | void * |
@@ -194,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
194 | int order = get_order(size); | 209 | int order = get_order(size); |
195 | u64 dma_mask = DMA_BIT_MASK(32); | 210 | u64 dma_mask = DMA_BIT_MASK(32); |
196 | unsigned long vstart; | 211 | unsigned long vstart; |
212 | phys_addr_t phys; | ||
213 | dma_addr_t dev_addr; | ||
197 | 214 | ||
198 | /* | 215 | /* |
199 | * Ignore region specifiers - the kernel's ideas of | 216 | * Ignore region specifiers - the kernel's ideas of |
@@ -209,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
209 | vstart = __get_free_pages(flags, order); | 226 | vstart = __get_free_pages(flags, order); |
210 | ret = (void *)vstart; | 227 | ret = (void *)vstart; |
211 | 228 | ||
229 | if (!ret) | ||
230 | return ret; | ||
231 | |||
212 | if (hwdev && hwdev->coherent_dma_mask) | 232 | if (hwdev && hwdev->coherent_dma_mask) |
213 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); | 233 | dma_mask = hwdev->coherent_dma_mask; |
214 | 234 | ||
215 | if (ret) { | 235 | phys = virt_to_phys(ret); |
236 | dev_addr = xen_phys_to_bus(phys); | ||
237 | if (((dev_addr + size - 1 <= dma_mask)) && | ||
238 | !range_straddles_page_boundary(phys, size)) | ||
239 | *dma_handle = dev_addr; | ||
240 | else { | ||
216 | if (xen_create_contiguous_region(vstart, order, | 241 | if (xen_create_contiguous_region(vstart, order, |
217 | fls64(dma_mask)) != 0) { | 242 | fls64(dma_mask)) != 0) { |
218 | free_pages(vstart, order); | 243 | free_pages(vstart, order); |
219 | return NULL; | 244 | return NULL; |
220 | } | 245 | } |
221 | memset(ret, 0, size); | ||
222 | *dma_handle = virt_to_machine(ret).maddr; | 246 | *dma_handle = virt_to_machine(ret).maddr; |
223 | } | 247 | } |
248 | memset(ret, 0, size); | ||
224 | return ret; | 249 | return ret; |
225 | } | 250 | } |
226 | EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); | 251 | EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); |
@@ -230,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
230 | dma_addr_t dev_addr) | 255 | dma_addr_t dev_addr) |
231 | { | 256 | { |
232 | int order = get_order(size); | 257 | int order = get_order(size); |
258 | phys_addr_t phys; | ||
259 | u64 dma_mask = DMA_BIT_MASK(32); | ||
233 | 260 | ||
234 | if (dma_release_from_coherent(hwdev, order, vaddr)) | 261 | if (dma_release_from_coherent(hwdev, order, vaddr)) |
235 | return; | 262 | return; |
236 | 263 | ||
237 | xen_destroy_contiguous_region((unsigned long)vaddr, order); | 264 | if (hwdev && hwdev->coherent_dma_mask) |
265 | dma_mask = hwdev->coherent_dma_mask; | ||
266 | |||
267 | phys = virt_to_phys(vaddr); | ||
268 | |||
269 | if (((dev_addr + size - 1 > dma_mask)) || | ||
270 | range_straddles_page_boundary(phys, size)) | ||
271 | xen_destroy_contiguous_region((unsigned long)vaddr, order); | ||
272 | |||
238 | free_pages((unsigned long)vaddr, order); | 273 | free_pages((unsigned long)vaddr, order); |
239 | } | 274 | } |
240 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); | 275 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); |
@@ -278,9 +313,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
278 | /* | 313 | /* |
279 | * Ensure that the address returned is DMA'ble | 314 | * Ensure that the address returned is DMA'ble |
280 | */ | 315 | */ |
281 | if (!dma_capable(dev, dev_addr, size)) | 316 | if (!dma_capable(dev, dev_addr, size)) { |
282 | panic("map_single: bounce buffer is not DMA'ble"); | 317 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
283 | 318 | dev_addr = 0; | |
319 | } | ||
284 | return dev_addr; | 320 | return dev_addr; |
285 | } | 321 | } |
286 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); | 322 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); |