summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c196
1 files changed, 59 insertions, 137 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 877baf2a94f4..5dcb06fe9667 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -391,13 +391,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
391 if (dma_capable(dev, dev_addr, size) && 391 if (dma_capable(dev, dev_addr, size) &&
392 !range_straddles_page_boundary(phys, size) && 392 !range_straddles_page_boundary(phys, size) &&
393 !xen_arch_need_swiotlb(dev, phys, dev_addr) && 393 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
394 (swiotlb_force != SWIOTLB_FORCE)) { 394 swiotlb_force != SWIOTLB_FORCE)
395 /* we are not interested in the dma_addr returned by 395 goto done;
396 * xen_dma_map_page, only in the potential cache flushes executed
397 * by the function. */
398 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
399 return dev_addr;
400 }
401 396
402 /* 397 /*
403 * Oh well, have to allocate and map a bounce buffer. 398 * Oh well, have to allocate and map a bounce buffer.
@@ -410,19 +405,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
410 return DMA_MAPPING_ERROR; 405 return DMA_MAPPING_ERROR;
411 406
412 dev_addr = xen_phys_to_bus(map); 407 dev_addr = xen_phys_to_bus(map);
413 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
414 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
415 408
416 /* 409 /*
417 * Ensure that the address returned is DMA'ble 410 * Ensure that the address returned is DMA'ble
418 */ 411 */
419 if (dma_capable(dev, dev_addr, size)) 412 if (unlikely(!dma_capable(dev, dev_addr, size))) {
420 return dev_addr; 413 swiotlb_tbl_unmap_single(dev, map, size, dir,
421 414 attrs | DMA_ATTR_SKIP_CPU_SYNC);
422 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 415 return DMA_MAPPING_ERROR;
423 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 416 }
424 417
425 return DMA_MAPPING_ERROR; 418 page = pfn_to_page(map >> PAGE_SHIFT);
419 offset = map & ~PAGE_MASK;
420done:
421 /*
422 * we are not interested in the dma_addr returned by xen_dma_map_page,
423 * only in the potential cache flushes executed by the function.
424 */
425 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
426 return dev_addr;
426} 427}
427 428
428/* 429/*
@@ -455,48 +456,28 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
455 xen_unmap_single(hwdev, dev_addr, size, dir, attrs); 456 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
456} 457}
457 458
458/*
459 * Make physical memory consistent for a single streaming mode DMA translation
460 * after a transfer.
461 *
462 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
463 * using the cpu, yet do not wish to teardown the dma mapping, you must
464 * call this function before doing so. At the next point you give the dma
465 * address back to the card, you must first perform a
466 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
467 */
468static void 459static void
469xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 460xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
470 size_t size, enum dma_data_direction dir, 461 size_t size, enum dma_data_direction dir)
471 enum dma_sync_target target)
472{ 462{
473 phys_addr_t paddr = xen_bus_to_phys(dev_addr); 463 phys_addr_t paddr = xen_bus_to_phys(dma_addr);
474
475 BUG_ON(dir == DMA_NONE);
476
477 if (target == SYNC_FOR_CPU)
478 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
479 464
480 /* NOTE: We use dev_addr here, not paddr! */ 465 xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir);
481 if (is_xen_swiotlb_buffer(dev_addr))
482 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
483 466
484 if (target == SYNC_FOR_DEVICE) 467 if (is_xen_swiotlb_buffer(dma_addr))
485 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); 468 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
486} 469}
487 470
488void 471static void
489xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 472xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
490 size_t size, enum dma_data_direction dir) 473 size_t size, enum dma_data_direction dir)
491{ 474{
492 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 475 phys_addr_t paddr = xen_bus_to_phys(dma_addr);
493}
494 476
495void 477 if (is_xen_swiotlb_buffer(dma_addr))
496xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 478 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
497 size_t size, enum dma_data_direction dir) 479
498{ 480 xen_dma_sync_single_for_device(dev, dma_addr, size, dir);
499 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
500} 481}
501 482
502/* 483/*
@@ -504,9 +485,8 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
504 * concerning calls here are the same as for swiotlb_unmap_page() above. 485 * concerning calls here are the same as for swiotlb_unmap_page() above.
505 */ 486 */
506static void 487static void
507xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 488xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
508 int nelems, enum dma_data_direction dir, 489 enum dma_data_direction dir, unsigned long attrs)
509 unsigned long attrs)
510{ 490{
511 struct scatterlist *sg; 491 struct scatterlist *sg;
512 int i; 492 int i;
@@ -518,26 +498,9 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
518 498
519} 499}
520 500
521/*
522 * Map a set of buffers described by scatterlist in streaming mode for DMA.
523 * This is the scatter-gather version of the above xen_swiotlb_map_page
524 * interface. Here the scatter gather list elements are each tagged with the
525 * appropriate dma address and length. They are obtained via
526 * sg_dma_{address,length}(SG).
527 *
528 * NOTE: An implementation may be able to use a smaller number of
529 * DMA address/length pairs than there are SG table elements.
530 * (for example via virtual mapping capabilities)
531 * The routine returns the number of addr/length pairs actually
532 * used, at most nents.
533 *
534 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
535 * same here.
536 */
537static int 501static int
538xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 502xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
539 int nelems, enum dma_data_direction dir, 503 enum dma_data_direction dir, unsigned long attrs)
540 unsigned long attrs)
541{ 504{
542 struct scatterlist *sg; 505 struct scatterlist *sg;
543 int i; 506 int i;
@@ -545,85 +508,44 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
545 BUG_ON(dir == DMA_NONE); 508 BUG_ON(dir == DMA_NONE);
546 509
547 for_each_sg(sgl, sg, nelems, i) { 510 for_each_sg(sgl, sg, nelems, i) {
548 phys_addr_t paddr = sg_phys(sg); 511 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
549 dma_addr_t dev_addr = xen_phys_to_bus(paddr); 512 sg->offset, sg->length, dir, attrs);
550 513 if (sg->dma_address == DMA_MAPPING_ERROR)
551 if (swiotlb_force == SWIOTLB_FORCE || 514 goto out_unmap;
552 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
553 !dma_capable(hwdev, dev_addr, sg->length) ||
554 range_straddles_page_boundary(paddr, sg->length)) {
555 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
556 start_dma_addr,
557 sg_phys(sg),
558 sg->length,
559 dir, attrs);
560 if (map == DMA_MAPPING_ERROR) {
561 dev_warn(hwdev, "swiotlb buffer is full\n");
562 /* Don't panic here, we expect map_sg users
563 to do proper error handling. */
564 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
565 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
566 attrs);
567 sg_dma_len(sgl) = 0;
568 return 0;
569 }
570 dev_addr = xen_phys_to_bus(map);
571 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
572 dev_addr,
573 map & ~PAGE_MASK,
574 sg->length,
575 dir,
576 attrs);
577 sg->dma_address = dev_addr;
578 } else {
579 /* we are not interested in the dma_addr returned by
580 * xen_dma_map_page, only in the potential cache flushes executed
581 * by the function. */
582 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
583 dev_addr,
584 paddr & ~PAGE_MASK,
585 sg->length,
586 dir,
587 attrs);
588 sg->dma_address = dev_addr;
589 }
590 sg_dma_len(sg) = sg->length; 515 sg_dma_len(sg) = sg->length;
591 } 516 }
517
592 return nelems; 518 return nelems;
519out_unmap:
520 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
521 sg_dma_len(sgl) = 0;
522 return 0;
593} 523}
594 524
595/*
596 * Make physical memory consistent for a set of streaming mode DMA translations
597 * after a transfer.
598 *
599 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
600 * and usage.
601 */
602static void 525static void
603xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 526xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
604 int nelems, enum dma_data_direction dir, 527 int nelems, enum dma_data_direction dir)
605 enum dma_sync_target target)
606{ 528{
607 struct scatterlist *sg; 529 struct scatterlist *sg;
608 int i; 530 int i;
609 531
610 for_each_sg(sgl, sg, nelems, i) 532 for_each_sg(sgl, sg, nelems, i) {
611 xen_swiotlb_sync_single(hwdev, sg->dma_address, 533 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
612 sg_dma_len(sg), dir, target); 534 sg->length, dir);
613} 535 }
614
615static void
616xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
617 int nelems, enum dma_data_direction dir)
618{
619 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
620} 536}
621 537
622static void 538static void
623xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 539xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
624 int nelems, enum dma_data_direction dir) 540 int nelems, enum dma_data_direction dir)
625{ 541{
626 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 542 struct scatterlist *sg;
543 int i;
544
545 for_each_sg(sgl, sg, nelems, i) {
546 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
547 sg->length, dir);
548 }
627} 549}
628 550
629/* 551/*
@@ -690,8 +612,8 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
690 .sync_single_for_device = xen_swiotlb_sync_single_for_device, 612 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
691 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, 613 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
692 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, 614 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
693 .map_sg = xen_swiotlb_map_sg_attrs, 615 .map_sg = xen_swiotlb_map_sg,
694 .unmap_sg = xen_swiotlb_unmap_sg_attrs, 616 .unmap_sg = xen_swiotlb_unmap_sg,
695 .map_page = xen_swiotlb_map_page, 617 .map_page = xen_swiotlb_map_page,
696 .unmap_page = xen_swiotlb_unmap_page, 618 .unmap_page = xen_swiotlb_unmap_page,
697 .dma_supported = xen_swiotlb_dma_supported, 619 .dma_supported = xen_swiotlb_dma_supported,