aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-16 20:39:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-16 20:39:14 -0500
commit9b690c3d56ce15dd265b6398f9d8d58c29c17032 (patch)
tree56477d1f4e596011f17d1c64e8597613330e5439 /drivers/xen
parent36cd5c19c3fe8291fac45a262c44c00bd14b531a (diff)
parentaf51a9f1848ff50079a10def56a2c064f326af22 (diff)
Merge tag 'stable/for-linus-3.8-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb
Pull swiotlb update from Konrad Rzeszutek Wilk: "Feature: - Use dma addresses instead of the virt_to_phys and vice versa functions. Remove the multitude of phys_to_virt/virt_to_phys calls and instead operate on the physical addresses instead of virtual in many of the internal functions. This does provide a speed up in interrupt handlers that do DMA operations and use SWIOTLB." * tag 'stable/for-linus-3.8-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb: swiotlb: Do not export swiotlb_bounce since there are no external consumers swiotlb: Use physical addresses instead of virtual in swiotlb_tbl_sync_single swiotlb: Use physical addresses for swiotlb_tbl_unmap_single swiotlb: Return physical addresses when calling swiotlb_tbl_map_single swiotlb: Make io_tlb_overflow_buffer a physical address swiotlb: Make io_tlb_start a physical address instead of a virtual one swiotlb: Make io_tlb_end a physical address instead of a virtual one
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 58db6df866ef..af47e7594460 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -338,9 +338,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
338 enum dma_data_direction dir, 338 enum dma_data_direction dir,
339 struct dma_attrs *attrs) 339 struct dma_attrs *attrs)
340{ 340{
341 phys_addr_t phys = page_to_phys(page) + offset; 341 phys_addr_t map, phys = page_to_phys(page) + offset;
342 dma_addr_t dev_addr = xen_phys_to_bus(phys); 342 dma_addr_t dev_addr = xen_phys_to_bus(phys);
343 void *map;
344 343
345 BUG_ON(dir == DMA_NONE); 344 BUG_ON(dir == DMA_NONE);
346 /* 345 /*
@@ -356,10 +355,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
356 * Oh well, have to allocate and map a bounce buffer. 355 * Oh well, have to allocate and map a bounce buffer.
357 */ 356 */
358 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); 357 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
359 if (!map) 358 if (map == SWIOTLB_MAP_ERROR)
360 return DMA_ERROR_CODE; 359 return DMA_ERROR_CODE;
361 360
362 dev_addr = xen_virt_to_bus(map); 361 dev_addr = xen_phys_to_bus(map);
363 362
364 /* 363 /*
365 * Ensure that the address returned is DMA'ble 364 * Ensure that the address returned is DMA'ble
@@ -389,7 +388,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
389 388
390 /* NOTE: We use dev_addr here, not paddr! */ 389 /* NOTE: We use dev_addr here, not paddr! */
391 if (is_xen_swiotlb_buffer(dev_addr)) { 390 if (is_xen_swiotlb_buffer(dev_addr)) {
392 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 391 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
393 return; 392 return;
394 } 393 }
395 394
@@ -434,8 +433,7 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
434 433
435 /* NOTE: We use dev_addr here, not paddr! */ 434 /* NOTE: We use dev_addr here, not paddr! */
436 if (is_xen_swiotlb_buffer(dev_addr)) { 435 if (is_xen_swiotlb_buffer(dev_addr)) {
437 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, 436 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
438 target);
439 return; 437 return;
440 } 438 }
441 439
@@ -494,11 +492,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
494 if (swiotlb_force || 492 if (swiotlb_force ||
495 !dma_capable(hwdev, dev_addr, sg->length) || 493 !dma_capable(hwdev, dev_addr, sg->length) ||
496 range_straddles_page_boundary(paddr, sg->length)) { 494 range_straddles_page_boundary(paddr, sg->length)) {
497 void *map = swiotlb_tbl_map_single(hwdev, 495 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
498 start_dma_addr, 496 start_dma_addr,
499 sg_phys(sg), 497 sg_phys(sg),
500 sg->length, dir); 498 sg->length,
501 if (!map) { 499 dir);
500 if (map == SWIOTLB_MAP_ERROR) {
502 /* Don't panic here, we expect map_sg users 501 /* Don't panic here, we expect map_sg users
503 to do proper error handling. */ 502 to do proper error handling. */
504 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, 503 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
@@ -506,7 +505,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
506 sgl[0].dma_length = 0; 505 sgl[0].dma_length = 0;
507 return DMA_ERROR_CODE; 506 return DMA_ERROR_CODE;
508 } 507 }
509 sg->dma_address = xen_virt_to_bus(map); 508 sg->dma_address = xen_phys_to_bus(map);
510 } else 509 } else
511 sg->dma_address = dev_addr; 510 sg->dma_address = dev_addr;
512 sg->dma_length = sg->length; 511 sg->dma_length = sg->length;