diff options
author | Rob Herring <rob.herring@calxeda.com> | 2012-08-21 06:23:23 -0400 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-10-02 02:58:06 -0400 |
commit | 0fa478df444f5837336d7e2fd0b41643c8d704c2 (patch) | |
tree | 4ce38a02ae1a1e5c4deba81711f06d6f650a80b1 | |
parent | dd37e9405a8e85be49a60b2530efeb5f06bcb753 (diff) |
ARM: add coherent iommu dma ops
Remove arch_is_coherent() from iommu dma ops and implement separate
coherent ops functions.
Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 183 |
1 files changed, 143 insertions, 40 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7d772c0a93f2..5d5a4c4a5db6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1350,7 +1350,8 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
1350 | */ | 1350 | */ |
1351 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | 1351 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, |
1352 | size_t size, dma_addr_t *handle, | 1352 | size_t size, dma_addr_t *handle, |
1353 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1353 | enum dma_data_direction dir, struct dma_attrs *attrs, |
1354 | bool is_coherent) | ||
1354 | { | 1355 | { |
1355 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 1356 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; |
1356 | dma_addr_t iova, iova_base; | 1357 | dma_addr_t iova, iova_base; |
@@ -1369,8 +1370,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | |||
1369 | phys_addr_t phys = page_to_phys(sg_page(s)); | 1370 | phys_addr_t phys = page_to_phys(sg_page(s)); |
1370 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | 1371 | unsigned int len = PAGE_ALIGN(s->offset + s->length); |
1371 | 1372 | ||
1372 | if (!arch_is_coherent() && | 1373 | if (!is_coherent && |
1373 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 1374 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
1374 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | 1375 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); |
1375 | 1376 | ||
1376 | ret = iommu_map(mapping->domain, iova, phys, len, 0); | 1377 | ret = iommu_map(mapping->domain, iova, phys, len, 0); |
@@ -1388,20 +1389,9 @@ fail: | |||
1388 | return ret; | 1389 | return ret; |
1389 | } | 1390 | } |
1390 | 1391 | ||
1391 | /** | 1392 | static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
1392 | * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA | 1393 | enum dma_data_direction dir, struct dma_attrs *attrs, |
1393 | * @dev: valid struct device pointer | 1394 | bool is_coherent) |
1394 | * @sg: list of buffers | ||
1395 | * @nents: number of buffers to map | ||
1396 | * @dir: DMA transfer direction | ||
1397 | * | ||
1398 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | ||
1399 | * The scatter gather list elements are merged together (if possible) and | ||
1400 | * tagged with the appropriate dma address and length. They are obtained via | ||
1401 | * sg_dma_{address,length}. | ||
1402 | */ | ||
1403 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
1404 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1405 | { | 1395 | { |
1406 | struct scatterlist *s = sg, *dma = sg, *start = sg; | 1396 | struct scatterlist *s = sg, *dma = sg, *start = sg; |
1407 | int i, count = 0; | 1397 | int i, count = 0; |
@@ -1417,7 +1407,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1417 | 1407 | ||
1418 | if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { | 1408 | if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { |
1419 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, | 1409 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, |
1420 | dir, attrs) < 0) | 1410 | dir, attrs, is_coherent) < 0) |
1421 | goto bad_mapping; | 1411 | goto bad_mapping; |
1422 | 1412 | ||
1423 | dma->dma_address += offset; | 1413 | dma->dma_address += offset; |
@@ -1430,7 +1420,8 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1430 | } | 1420 | } |
1431 | size += s->length; | 1421 | size += s->length; |
1432 | } | 1422 | } |
1433 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0) | 1423 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, |
1424 | is_coherent) < 0) | ||
1434 | goto bad_mapping; | 1425 | goto bad_mapping; |
1435 | 1426 | ||
1436 | dma->dma_address += offset; | 1427 | dma->dma_address += offset; |
@@ -1445,17 +1436,44 @@ bad_mapping: | |||
1445 | } | 1436 | } |
1446 | 1437 | ||
1447 | /** | 1438 | /** |
1448 | * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 1439 | * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA |
1449 | * @dev: valid struct device pointer | 1440 | * @dev: valid struct device pointer |
1450 | * @sg: list of buffers | 1441 | * @sg: list of buffers |
1451 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | 1442 | * @nents: number of buffers to map |
1452 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 1443 | * @dir: DMA transfer direction |
1453 | * | 1444 | * |
1454 | * Unmap a set of streaming mode DMA translations. Again, CPU access | 1445 | * Map a set of i/o coherent buffers described by scatterlist in streaming |
1455 | * rules concerning calls here are the same as for dma_unmap_single(). | 1446 | * mode for DMA. The scatter gather list elements are merged together (if |
1447 | * possible) and tagged with the appropriate dma address and length. They are | ||
1448 | * obtained via sg_dma_{address,length}. | ||
1456 | */ | 1449 | */ |
1457 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 1450 | int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
1458 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1451 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) |
1452 | { | ||
1453 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); | ||
1454 | } | ||
1455 | |||
1456 | /** | ||
1457 | * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA | ||
1458 | * @dev: valid struct device pointer | ||
1459 | * @sg: list of buffers | ||
1460 | * @nents: number of buffers to map | ||
1461 | * @dir: DMA transfer direction | ||
1462 | * | ||
1463 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | ||
1464 | * The scatter gather list elements are merged together (if possible) and | ||
1465 | * tagged with the appropriate dma address and length. They are obtained via | ||
1466 | * sg_dma_{address,length}. | ||
1467 | */ | ||
1468 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, | ||
1469 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1470 | { | ||
1471 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); | ||
1472 | } | ||
1473 | |||
1474 | static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
1475 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs, | ||
1476 | bool is_coherent) | ||
1459 | { | 1477 | { |
1460 | struct scatterlist *s; | 1478 | struct scatterlist *s; |
1461 | int i; | 1479 | int i; |
@@ -1464,7 +1482,7 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1464 | if (sg_dma_len(s)) | 1482 | if (sg_dma_len(s)) |
1465 | __iommu_remove_mapping(dev, sg_dma_address(s), | 1483 | __iommu_remove_mapping(dev, sg_dma_address(s), |
1466 | sg_dma_len(s)); | 1484 | sg_dma_len(s)); |
1467 | if (!arch_is_coherent() && | 1485 | if (!is_coherent && |
1468 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 1486 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
1469 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 1487 | __dma_page_dev_to_cpu(sg_page(s), s->offset, |
1470 | s->length, dir); | 1488 | s->length, dir); |
@@ -1472,6 +1490,38 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
1472 | } | 1490 | } |
1473 | 1491 | ||
1474 | /** | 1492 | /** |
1493 | * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
1494 | * @dev: valid struct device pointer | ||
1495 | * @sg: list of buffers | ||
1496 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | ||
1497 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | ||
1498 | * | ||
1499 | * Unmap a set of streaming mode DMA translations. Again, CPU access | ||
1500 | * rules concerning calls here are the same as for dma_unmap_single(). | ||
1501 | */ | ||
1502 | void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
1503 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1504 | { | ||
1505 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); | ||
1506 | } | ||
1507 | |||
1508 | /** | ||
1509 | * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
1510 | * @dev: valid struct device pointer | ||
1511 | * @sg: list of buffers | ||
1512 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | ||
1513 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | ||
1514 | * | ||
1515 | * Unmap a set of streaming mode DMA translations. Again, CPU access | ||
1516 | * rules concerning calls here are the same as for dma_unmap_single(). | ||
1517 | */ | ||
1518 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
1519 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1520 | { | ||
1521 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); | ||
1522 | } | ||
1523 | |||
1524 | /** | ||
1475 | * arm_iommu_sync_sg_for_cpu | 1525 | * arm_iommu_sync_sg_for_cpu |
1476 | * @dev: valid struct device pointer | 1526 | * @dev: valid struct device pointer |
1477 | * @sg: list of buffers | 1527 | * @sg: list of buffers |
@@ -1485,8 +1535,7 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
1485 | int i; | 1535 | int i; |
1486 | 1536 | ||
1487 | for_each_sg(sg, s, nents, i) | 1537 | for_each_sg(sg, s, nents, i) |
1488 | if (!arch_is_coherent()) | 1538 | __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); |
1489 | __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); | ||
1490 | 1539 | ||
1491 | } | 1540 | } |
1492 | 1541 | ||
@@ -1504,22 +1553,21 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
1504 | int i; | 1553 | int i; |
1505 | 1554 | ||
1506 | for_each_sg(sg, s, nents, i) | 1555 | for_each_sg(sg, s, nents, i) |
1507 | if (!arch_is_coherent()) | 1556 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); |
1508 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | ||
1509 | } | 1557 | } |
1510 | 1558 | ||
1511 | 1559 | ||
1512 | /** | 1560 | /** |
1513 | * arm_iommu_map_page | 1561 | * arm_coherent_iommu_map_page |
1514 | * @dev: valid struct device pointer | 1562 | * @dev: valid struct device pointer |
1515 | * @page: page that buffer resides in | 1563 | * @page: page that buffer resides in |
1516 | * @offset: offset into page for start of buffer | 1564 | * @offset: offset into page for start of buffer |
1517 | * @size: size of buffer to map | 1565 | * @size: size of buffer to map |
1518 | * @dir: DMA transfer direction | 1566 | * @dir: DMA transfer direction |
1519 | * | 1567 | * |
1520 | * IOMMU aware version of arm_dma_map_page() | 1568 | * Coherent IOMMU aware version of arm_dma_map_page() |
1521 | */ | 1569 | */ |
1522 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | 1570 | static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, |
1523 | unsigned long offset, size_t size, enum dma_data_direction dir, | 1571 | unsigned long offset, size_t size, enum dma_data_direction dir, |
1524 | struct dma_attrs *attrs) | 1572 | struct dma_attrs *attrs) |
1525 | { | 1573 | { |
@@ -1527,9 +1575,6 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | |||
1527 | dma_addr_t dma_addr; | 1575 | dma_addr_t dma_addr; |
1528 | int ret, len = PAGE_ALIGN(size + offset); | 1576 | int ret, len = PAGE_ALIGN(size + offset); |
1529 | 1577 | ||
1530 | if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1531 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
1532 | |||
1533 | dma_addr = __alloc_iova(mapping, len); | 1578 | dma_addr = __alloc_iova(mapping, len); |
1534 | if (dma_addr == DMA_ERROR_CODE) | 1579 | if (dma_addr == DMA_ERROR_CODE) |
1535 | return dma_addr; | 1580 | return dma_addr; |
@@ -1545,6 +1590,52 @@ fail: | |||
1545 | } | 1590 | } |
1546 | 1591 | ||
1547 | /** | 1592 | /** |
1593 | * arm_iommu_map_page | ||
1594 | * @dev: valid struct device pointer | ||
1595 | * @page: page that buffer resides in | ||
1596 | * @offset: offset into page for start of buffer | ||
1597 | * @size: size of buffer to map | ||
1598 | * @dir: DMA transfer direction | ||
1599 | * | ||
1600 | * IOMMU aware version of arm_dma_map_page() | ||
1601 | */ | ||
1602 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | ||
1603 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
1604 | struct dma_attrs *attrs) | ||
1605 | { | ||
1606 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1607 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
1608 | |||
1609 | return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); | ||
1610 | } | ||
1611 | |||
1612 | /** | ||
1613 | * arm_coherent_iommu_unmap_page | ||
1614 | * @dev: valid struct device pointer | ||
1615 | * @handle: DMA address of buffer | ||
1616 | * @size: size of buffer (same as passed to dma_map_page) | ||
1617 | * @dir: DMA transfer direction (same as passed to dma_map_page) | ||
1618 | * | ||
1619 | * Coherent IOMMU aware version of arm_dma_unmap_page() | ||
1620 | */ | ||
1621 | static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, | ||
1622 | size_t size, enum dma_data_direction dir, | ||
1623 | struct dma_attrs *attrs) | ||
1624 | { | ||
1625 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1626 | dma_addr_t iova = handle & PAGE_MASK; | ||
1627 | struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); | ||
1628 | int offset = handle & ~PAGE_MASK; | ||
1629 | int len = PAGE_ALIGN(size + offset); | ||
1630 | |||
1631 | if (!iova) | ||
1632 | return; | ||
1633 | |||
1634 | iommu_unmap(mapping->domain, iova, len); | ||
1635 | __free_iova(mapping, iova, len); | ||
1636 | } | ||
1637 | |||
1638 | /** | ||
1548 | * arm_iommu_unmap_page | 1639 | * arm_iommu_unmap_page |
1549 | * @dev: valid struct device pointer | 1640 | * @dev: valid struct device pointer |
1550 | * @handle: DMA address of buffer | 1641 | * @handle: DMA address of buffer |
@@ -1566,7 +1657,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | |||
1566 | if (!iova) | 1657 | if (!iova) |
1567 | return; | 1658 | return; |
1568 | 1659 | ||
1569 | if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 1660 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) |
1570 | __dma_page_dev_to_cpu(page, offset, size, dir); | 1661 | __dma_page_dev_to_cpu(page, offset, size, dir); |
1571 | 1662 | ||
1572 | iommu_unmap(mapping->domain, iova, len); | 1663 | iommu_unmap(mapping->domain, iova, len); |
@@ -1584,8 +1675,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, | |||
1584 | if (!iova) | 1675 | if (!iova) |
1585 | return; | 1676 | return; |
1586 | 1677 | ||
1587 | if (!arch_is_coherent()) | 1678 | __dma_page_dev_to_cpu(page, offset, size, dir); |
1588 | __dma_page_dev_to_cpu(page, offset, size, dir); | ||
1589 | } | 1679 | } |
1590 | 1680 | ||
1591 | static void arm_iommu_sync_single_for_device(struct device *dev, | 1681 | static void arm_iommu_sync_single_for_device(struct device *dev, |
@@ -1619,6 +1709,19 @@ struct dma_map_ops iommu_ops = { | |||
1619 | .sync_sg_for_device = arm_iommu_sync_sg_for_device, | 1709 | .sync_sg_for_device = arm_iommu_sync_sg_for_device, |
1620 | }; | 1710 | }; |
1621 | 1711 | ||
1712 | struct dma_map_ops iommu_coherent_ops = { | ||
1713 | .alloc = arm_iommu_alloc_attrs, | ||
1714 | .free = arm_iommu_free_attrs, | ||
1715 | .mmap = arm_iommu_mmap_attrs, | ||
1716 | .get_sgtable = arm_iommu_get_sgtable, | ||
1717 | |||
1718 | .map_page = arm_coherent_iommu_map_page, | ||
1719 | .unmap_page = arm_coherent_iommu_unmap_page, | ||
1720 | |||
1721 | .map_sg = arm_coherent_iommu_map_sg, | ||
1722 | .unmap_sg = arm_coherent_iommu_unmap_sg, | ||
1723 | }; | ||
1724 | |||
1622 | /** | 1725 | /** |
1623 | * arm_iommu_create_mapping | 1726 | * arm_iommu_create_mapping |
1624 | * @bus: pointer to the bus holding the client device (for IOMMU calls) | 1727 | * @bus: pointer to the bus holding the client device (for IOMMU calls) |