aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-08-25 16:13:54 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-08-26 12:10:42 -0400
commit6810df88dcfc22de267caf23eb072ffb97b3c411 (patch)
tree5481262624654fc570be5aa7e1a5513687df263d /drivers/xen
parent12e13ac84ca70e6641a4750e9317aa2d2c1f6f50 (diff)
xen-swiotlb: When doing coherent alloc/dealloc check before swizzling the MFNs.
The process to swizzle a Machine Frame Number (MFN) is not always necessary. Especially if we know that we actually do not have to do it. In this patch we check the MFN against the device's coherent DMA mask and if the requested page(s) are contingous. If it all checks out we will just return the bus addr without doing the memory swizzle. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 0408f3225722..c984768d98ca 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -209,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
209 int order = get_order(size); 209 int order = get_order(size);
210 u64 dma_mask = DMA_BIT_MASK(32); 210 u64 dma_mask = DMA_BIT_MASK(32);
211 unsigned long vstart; 211 unsigned long vstart;
212 phys_addr_t phys;
213 dma_addr_t dev_addr;
212 214
213 /* 215 /*
214 * Ignore region specifiers - the kernel's ideas of 216 * Ignore region specifiers - the kernel's ideas of
@@ -224,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
224 vstart = __get_free_pages(flags, order); 226 vstart = __get_free_pages(flags, order);
225 ret = (void *)vstart; 227 ret = (void *)vstart;
226 228
229 if (!ret)
230 return ret;
231
227 if (hwdev && hwdev->coherent_dma_mask) 232 if (hwdev && hwdev->coherent_dma_mask)
228 dma_mask = dma_alloc_coherent_mask(hwdev, flags); 233 dma_mask = hwdev->coherent_dma_mask;
229 234
230 if (ret) { 235 phys = virt_to_phys(ret);
236 dev_addr = xen_phys_to_bus(phys);
237 if (((dev_addr + size - 1 <= dma_mask)) &&
238 !range_straddles_page_boundary(phys, size))
239 *dma_handle = dev_addr;
240 else {
231 if (xen_create_contiguous_region(vstart, order, 241 if (xen_create_contiguous_region(vstart, order,
232 fls64(dma_mask)) != 0) { 242 fls64(dma_mask)) != 0) {
233 free_pages(vstart, order); 243 free_pages(vstart, order);
234 return NULL; 244 return NULL;
235 } 245 }
236 memset(ret, 0, size);
237 *dma_handle = virt_to_machine(ret).maddr; 246 *dma_handle = virt_to_machine(ret).maddr;
238 } 247 }
248 memset(ret, 0, size);
239 return ret; 249 return ret;
240} 250}
241EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); 251EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
@@ -245,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
245 dma_addr_t dev_addr) 255 dma_addr_t dev_addr)
246{ 256{
247 int order = get_order(size); 257 int order = get_order(size);
258 phys_addr_t phys;
259 u64 dma_mask = DMA_BIT_MASK(32);
248 260
249 if (dma_release_from_coherent(hwdev, order, vaddr)) 261 if (dma_release_from_coherent(hwdev, order, vaddr))
250 return; 262 return;
251 263
252 xen_destroy_contiguous_region((unsigned long)vaddr, order); 264 if (hwdev && hwdev->coherent_dma_mask)
265 dma_mask = hwdev->coherent_dma_mask;
266
267 phys = virt_to_phys(vaddr);
268
269 if (((dev_addr + size - 1 > dma_mask)) ||
270 range_straddles_page_boundary(phys, size))
271 xen_destroy_contiguous_region((unsigned long)vaddr, order);
272
253 free_pages((unsigned long)vaddr, order); 273 free_pages((unsigned long)vaddr, order);
254} 274}
255EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); 275EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);