diff options
| -rw-r--r-- | drivers/xen/swiotlb-xen.c | 22 | ||||
| -rw-r--r-- | include/linux/swiotlb.h | 11 | ||||
| -rw-r--r-- | lib/swiotlb.c | 78 |
3 files changed, 59 insertions, 52 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 58db6df866ef..8a6035aa69c9 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
| @@ -338,9 +338,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
| 338 | enum dma_data_direction dir, | 338 | enum dma_data_direction dir, |
| 339 | struct dma_attrs *attrs) | 339 | struct dma_attrs *attrs) |
| 340 | { | 340 | { |
| 341 | phys_addr_t phys = page_to_phys(page) + offset; | 341 | phys_addr_t map, phys = page_to_phys(page) + offset; |
| 342 | dma_addr_t dev_addr = xen_phys_to_bus(phys); | 342 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
| 343 | void *map; | ||
| 344 | 343 | ||
| 345 | BUG_ON(dir == DMA_NONE); | 344 | BUG_ON(dir == DMA_NONE); |
| 346 | /* | 345 | /* |
| @@ -356,16 +355,16 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
| 356 | * Oh well, have to allocate and map a bounce buffer. | 355 | * Oh well, have to allocate and map a bounce buffer. |
| 357 | */ | 356 | */ |
| 358 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); | 357 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); |
| 359 | if (!map) | 358 | if (map == SWIOTLB_MAP_ERROR) |
| 360 | return DMA_ERROR_CODE; | 359 | return DMA_ERROR_CODE; |
| 361 | 360 | ||
| 362 | dev_addr = xen_virt_to_bus(map); | 361 | dev_addr = xen_phys_to_bus(map); |
| 363 | 362 | ||
| 364 | /* | 363 | /* |
| 365 | * Ensure that the address returned is DMA'ble | 364 | * Ensure that the address returned is DMA'ble |
| 366 | */ | 365 | */ |
| 367 | if (!dma_capable(dev, dev_addr, size)) { | 366 | if (!dma_capable(dev, dev_addr, size)) { |
| 368 | swiotlb_tbl_unmap_single(dev, map, size, dir); | 367 | swiotlb_tbl_unmap_single(dev, phys_to_virt(map), size, dir); |
| 369 | dev_addr = 0; | 368 | dev_addr = 0; |
| 370 | } | 369 | } |
| 371 | return dev_addr; | 370 | return dev_addr; |
| @@ -494,11 +493,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
| 494 | if (swiotlb_force || | 493 | if (swiotlb_force || |
| 495 | !dma_capable(hwdev, dev_addr, sg->length) || | 494 | !dma_capable(hwdev, dev_addr, sg->length) || |
| 496 | range_straddles_page_boundary(paddr, sg->length)) { | 495 | range_straddles_page_boundary(paddr, sg->length)) { |
| 497 | void *map = swiotlb_tbl_map_single(hwdev, | 496 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, |
| 498 | start_dma_addr, | 497 | start_dma_addr, |
| 499 | sg_phys(sg), | 498 | sg_phys(sg), |
| 500 | sg->length, dir); | 499 | sg->length, |
| 501 | if (!map) { | 500 | dir); |
| 501 | if (map == SWIOTLB_MAP_ERROR) { | ||
| 502 | /* Don't panic here, we expect map_sg users | 502 | /* Don't panic here, we expect map_sg users |
| 503 | to do proper error handling. */ | 503 | to do proper error handling. */ |
| 504 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, | 504 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
| @@ -506,7 +506,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
| 506 | sgl[0].dma_length = 0; | 506 | sgl[0].dma_length = 0; |
| 507 | return DMA_ERROR_CODE; | 507 | return DMA_ERROR_CODE; |
| 508 | } | 508 | } |
| 509 | sg->dma_address = xen_virt_to_bus(map); | 509 | sg->dma_address = xen_phys_to_bus(map); |
| 510 | } else | 510 | } else |
| 511 | sg->dma_address = dev_addr; | 511 | sg->dma_address = dev_addr; |
| 512 | sg->dma_length = sg->length; | 512 | sg->dma_length = sg->length; |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 8d08b3ed406d..1995f3e04fed 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
| @@ -34,9 +34,14 @@ enum dma_sync_target { | |||
| 34 | SYNC_FOR_CPU = 0, | 34 | SYNC_FOR_CPU = 0, |
| 35 | SYNC_FOR_DEVICE = 1, | 35 | SYNC_FOR_DEVICE = 1, |
| 36 | }; | 36 | }; |
| 37 | extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, | 37 | |
| 38 | phys_addr_t phys, size_t size, | 38 | /* define the last possible byte of physical address space as a mapping error */ |
| 39 | enum dma_data_direction dir); | 39 | #define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0) |
| 40 | |||
| 41 | extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | ||
| 42 | dma_addr_t tbl_dma_addr, | ||
| 43 | phys_addr_t phys, size_t size, | ||
| 44 | enum dma_data_direction dir); | ||
| 40 | 45 | ||
| 41 | extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, | 46 | extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, |
| 42 | size_t size, enum dma_data_direction dir); | 47 | size_t size, enum dma_data_direction dir); |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index f8c0d4e1d1d3..3adc148bb8d8 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -393,12 +393,13 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
| 393 | } | 393 | } |
| 394 | EXPORT_SYMBOL_GPL(swiotlb_bounce); | 394 | EXPORT_SYMBOL_GPL(swiotlb_bounce); |
| 395 | 395 | ||
| 396 | void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, | 396 | phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, |
| 397 | phys_addr_t phys, size_t size, | 397 | dma_addr_t tbl_dma_addr, |
| 398 | enum dma_data_direction dir) | 398 | phys_addr_t orig_addr, size_t size, |
| 399 | enum dma_data_direction dir) | ||
| 399 | { | 400 | { |
| 400 | unsigned long flags; | 401 | unsigned long flags; |
| 401 | char *dma_addr; | 402 | phys_addr_t tlb_addr; |
| 402 | unsigned int nslots, stride, index, wrap; | 403 | unsigned int nslots, stride, index, wrap; |
| 403 | int i; | 404 | int i; |
| 404 | unsigned long mask; | 405 | unsigned long mask; |
| @@ -462,7 +463,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, | |||
| 462 | io_tlb_list[i] = 0; | 463 | io_tlb_list[i] = 0; |
| 463 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) | 464 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) |
| 464 | io_tlb_list[i] = ++count; | 465 | io_tlb_list[i] = ++count; |
| 465 | dma_addr = (char *)phys_to_virt(io_tlb_start) + (index << IO_TLB_SHIFT); | 466 | tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); |
| 466 | 467 | ||
| 467 | /* | 468 | /* |
| 468 | * Update the indices to avoid searching in the next | 469 | * Update the indices to avoid searching in the next |
| @@ -480,7 +481,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, | |||
| 480 | 481 | ||
| 481 | not_found: | 482 | not_found: |
| 482 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 483 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
| 483 | return NULL; | 484 | return SWIOTLB_MAP_ERROR; |
| 484 | found: | 485 | found: |
| 485 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 486 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
| 486 | 487 | ||
| @@ -490,11 +491,12 @@ found: | |||
| 490 | * needed. | 491 | * needed. |
| 491 | */ | 492 | */ |
| 492 | for (i = 0; i < nslots; i++) | 493 | for (i = 0; i < nslots; i++) |
| 493 | io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); | 494 | io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); |
| 494 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 495 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 495 | swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); | 496 | swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), size, |
| 497 | DMA_TO_DEVICE); | ||
| 496 | 498 | ||
| 497 | return dma_addr; | 499 | return tlb_addr; |
| 498 | } | 500 | } |
| 499 | EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); | 501 | EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); |
| 500 | 502 | ||
| @@ -502,9 +504,8 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); | |||
| 502 | * Allocates bounce buffer and returns its kernel virtual address. | 504 | * Allocates bounce buffer and returns its kernel virtual address. |
| 503 | */ | 505 | */ |
| 504 | 506 | ||
| 505 | static void * | 507 | phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, |
| 506 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | 508 | enum dma_data_direction dir) |
| 507 | enum dma_data_direction dir) | ||
| 508 | { | 509 | { |
| 509 | dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); | 510 | dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); |
| 510 | 511 | ||
| @@ -598,12 +599,15 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 598 | dma_mask = hwdev->coherent_dma_mask; | 599 | dma_mask = hwdev->coherent_dma_mask; |
| 599 | 600 | ||
| 600 | ret = (void *)__get_free_pages(flags, order); | 601 | ret = (void *)__get_free_pages(flags, order); |
| 601 | if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { | 602 | if (ret) { |
| 602 | /* | 603 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); |
| 603 | * The allocated memory isn't reachable by the device. | 604 | if (dev_addr + size - 1 > dma_mask) { |
| 604 | */ | 605 | /* |
| 605 | free_pages((unsigned long) ret, order); | 606 | * The allocated memory isn't reachable by the device. |
| 606 | ret = NULL; | 607 | */ |
| 608 | free_pages((unsigned long) ret, order); | ||
| 609 | ret = NULL; | ||
| 610 | } | ||
| 607 | } | 611 | } |
| 608 | if (!ret) { | 612 | if (!ret) { |
| 609 | /* | 613 | /* |
| @@ -611,13 +615,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 611 | * GFP_DMA memory; fall back on map_single(), which | 615 | * GFP_DMA memory; fall back on map_single(), which |
| 612 | * will grab memory from the lowest available address range. | 616 | * will grab memory from the lowest available address range. |
| 613 | */ | 617 | */ |
| 614 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 618 | phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
| 615 | if (!ret) | 619 | if (paddr == SWIOTLB_MAP_ERROR) |
| 616 | return NULL; | 620 | return NULL; |
| 617 | } | ||
| 618 | 621 | ||
| 619 | memset(ret, 0, size); | 622 | ret = phys_to_virt(paddr); |
| 620 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); | 623 | dev_addr = phys_to_dma(hwdev, paddr); |
| 624 | } | ||
| 621 | 625 | ||
| 622 | /* Confirm address can be DMA'd by device */ | 626 | /* Confirm address can be DMA'd by device */ |
| 623 | if (dev_addr + size - 1 > dma_mask) { | 627 | if (dev_addr + size - 1 > dma_mask) { |
| @@ -629,7 +633,10 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 629 | swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 633 | swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
| 630 | return NULL; | 634 | return NULL; |
| 631 | } | 635 | } |
| 636 | |||
| 632 | *dma_handle = dev_addr; | 637 | *dma_handle = dev_addr; |
| 638 | memset(ret, 0, size); | ||
| 639 | |||
| 633 | return ret; | 640 | return ret; |
| 634 | } | 641 | } |
| 635 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | 642 | EXPORT_SYMBOL(swiotlb_alloc_coherent); |
| @@ -686,9 +693,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
| 686 | enum dma_data_direction dir, | 693 | enum dma_data_direction dir, |
| 687 | struct dma_attrs *attrs) | 694 | struct dma_attrs *attrs) |
| 688 | { | 695 | { |
| 689 | phys_addr_t phys = page_to_phys(page) + offset; | 696 | phys_addr_t map, phys = page_to_phys(page) + offset; |
| 690 | dma_addr_t dev_addr = phys_to_dma(dev, phys); | 697 | dma_addr_t dev_addr = phys_to_dma(dev, phys); |
| 691 | void *map; | ||
| 692 | 698 | ||
| 693 | BUG_ON(dir == DMA_NONE); | 699 | BUG_ON(dir == DMA_NONE); |
| 694 | /* | 700 | /* |
| @@ -699,22 +705,18 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
| 699 | if (dma_capable(dev, dev_addr, size) && !swiotlb_force) | 705 | if (dma_capable(dev, dev_addr, size) && !swiotlb_force) |
| 700 | return dev_addr; | 706 | return dev_addr; |
| 701 | 707 | ||
| 702 | /* | 708 | /* Oh well, have to allocate and map a bounce buffer. */ |
| 703 | * Oh well, have to allocate and map a bounce buffer. | ||
| 704 | */ | ||
| 705 | map = map_single(dev, phys, size, dir); | 709 | map = map_single(dev, phys, size, dir); |
| 706 | if (!map) { | 710 | if (map == SWIOTLB_MAP_ERROR) { |
| 707 | swiotlb_full(dev, size, dir, 1); | 711 | swiotlb_full(dev, size, dir, 1); |
| 708 | return phys_to_dma(dev, io_tlb_overflow_buffer); | 712 | return phys_to_dma(dev, io_tlb_overflow_buffer); |
| 709 | } | 713 | } |
| 710 | 714 | ||
| 711 | dev_addr = swiotlb_virt_to_bus(dev, map); | 715 | dev_addr = phys_to_dma(dev, map); |
| 712 | 716 | ||
| 713 | /* | 717 | /* Ensure that the address returned is DMA'ble */ |
| 714 | * Ensure that the address returned is DMA'ble | ||
| 715 | */ | ||
| 716 | if (!dma_capable(dev, dev_addr, size)) { | 718 | if (!dma_capable(dev, dev_addr, size)) { |
| 717 | swiotlb_tbl_unmap_single(dev, map, size, dir); | 719 | swiotlb_tbl_unmap_single(dev, phys_to_virt(map), size, dir); |
| 718 | return phys_to_dma(dev, io_tlb_overflow_buffer); | 720 | return phys_to_dma(dev, io_tlb_overflow_buffer); |
| 719 | } | 721 | } |
| 720 | 722 | ||
| @@ -840,9 +842,9 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
| 840 | 842 | ||
| 841 | if (swiotlb_force || | 843 | if (swiotlb_force || |
| 842 | !dma_capable(hwdev, dev_addr, sg->length)) { | 844 | !dma_capable(hwdev, dev_addr, sg->length)) { |
| 843 | void *map = map_single(hwdev, sg_phys(sg), | 845 | phys_addr_t map = map_single(hwdev, sg_phys(sg), |
| 844 | sg->length, dir); | 846 | sg->length, dir); |
| 845 | if (!map) { | 847 | if (map == SWIOTLB_MAP_ERROR) { |
| 846 | /* Don't panic here, we expect map_sg users | 848 | /* Don't panic here, we expect map_sg users |
| 847 | to do proper error handling. */ | 849 | to do proper error handling. */ |
| 848 | swiotlb_full(hwdev, sg->length, dir, 0); | 850 | swiotlb_full(hwdev, sg->length, dir, 0); |
| @@ -851,7 +853,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
| 851 | sgl[0].dma_length = 0; | 853 | sgl[0].dma_length = 0; |
| 852 | return 0; | 854 | return 0; |
| 853 | } | 855 | } |
| 854 | sg->dma_address = swiotlb_virt_to_bus(hwdev, map); | 856 | sg->dma_address = phys_to_dma(hwdev, map); |
| 855 | } else | 857 | } else |
| 856 | sg->dma_address = dev_addr; | 858 | sg->dma_address = dev_addr; |
| 857 | sg->dma_length = sg->length; | 859 | sg->dma_length = sg->length; |
