aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/rdma/ib_verbs.h70
1 files changed, 39 insertions, 31 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fd2353fa7e12..3c2e10574b23 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1456,9 +1456,9 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1456 */ 1456 */
1457static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 1457static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1458{ 1458{
1459 return dev->dma_ops ? 1459 if (dev->dma_ops)
1460 dev->dma_ops->mapping_error(dev, dma_addr) : 1460 return dev->dma_ops->mapping_error(dev, dma_addr);
1461 dma_mapping_error(dma_addr); 1461 return dma_mapping_error(dma_addr);
1462} 1462}
1463 1463
1464/** 1464/**
@@ -1472,9 +1472,9 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
1472 void *cpu_addr, size_t size, 1472 void *cpu_addr, size_t size,
1473 enum dma_data_direction direction) 1473 enum dma_data_direction direction)
1474{ 1474{
1475 return dev->dma_ops ? 1475 if (dev->dma_ops)
1476 dev->dma_ops->map_single(dev, cpu_addr, size, direction) : 1476 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1477 dma_map_single(dev->dma_device, cpu_addr, size, direction); 1477 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1478} 1478}
1479 1479
1480/** 1480/**
@@ -1488,8 +1488,9 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
1488 u64 addr, size_t size, 1488 u64 addr, size_t size,
1489 enum dma_data_direction direction) 1489 enum dma_data_direction direction)
1490{ 1490{
1491 dev->dma_ops ? 1491 if (dev->dma_ops)
1492 dev->dma_ops->unmap_single(dev, addr, size, direction) : 1492 dev->dma_ops->unmap_single(dev, addr, size, direction);
1493 else
1493 dma_unmap_single(dev->dma_device, addr, size, direction); 1494 dma_unmap_single(dev->dma_device, addr, size, direction);
1494} 1495}
1495 1496
@@ -1507,9 +1508,9 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
1507 size_t size, 1508 size_t size,
1508 enum dma_data_direction direction) 1509 enum dma_data_direction direction)
1509{ 1510{
1510 return dev->dma_ops ? 1511 if (dev->dma_ops)
1511 dev->dma_ops->map_page(dev, page, offset, size, direction) : 1512 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1512 dma_map_page(dev->dma_device, page, offset, size, direction); 1513 return dma_map_page(dev->dma_device, page, offset, size, direction);
1513} 1514}
1514 1515
1515/** 1516/**
@@ -1523,8 +1524,9 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
1523 u64 addr, size_t size, 1524 u64 addr, size_t size,
1524 enum dma_data_direction direction) 1525 enum dma_data_direction direction)
1525{ 1526{
1526 dev->dma_ops ? 1527 if (dev->dma_ops)
1527 dev->dma_ops->unmap_page(dev, addr, size, direction) : 1528 dev->dma_ops->unmap_page(dev, addr, size, direction);
1529 else
1528 dma_unmap_page(dev->dma_device, addr, size, direction); 1530 dma_unmap_page(dev->dma_device, addr, size, direction);
1529} 1531}
1530 1532
@@ -1539,9 +1541,9 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
1539 struct scatterlist *sg, int nents, 1541 struct scatterlist *sg, int nents,
1540 enum dma_data_direction direction) 1542 enum dma_data_direction direction)
1541{ 1543{
1542 return dev->dma_ops ? 1544 if (dev->dma_ops)
1543 dev->dma_ops->map_sg(dev, sg, nents, direction) : 1545 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1544 dma_map_sg(dev->dma_device, sg, nents, direction); 1546 return dma_map_sg(dev->dma_device, sg, nents, direction);
1545} 1547}
1546 1548
1547/** 1549/**
@@ -1555,8 +1557,9 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
1555 struct scatterlist *sg, int nents, 1557 struct scatterlist *sg, int nents,
1556 enum dma_data_direction direction) 1558 enum dma_data_direction direction)
1557{ 1559{
1558 dev->dma_ops ? 1560 if (dev->dma_ops)
1559 dev->dma_ops->unmap_sg(dev, sg, nents, direction) : 1561 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1562 else
1560 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1563 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1561} 1564}
1562 1565
@@ -1568,8 +1571,9 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
1568static inline u64 ib_sg_dma_address(struct ib_device *dev, 1571static inline u64 ib_sg_dma_address(struct ib_device *dev,
1569 struct scatterlist *sg) 1572 struct scatterlist *sg)
1570{ 1573{
1571 return dev->dma_ops ? 1574 if (dev->dma_ops)
1572 dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg); 1575 return dev->dma_ops->dma_address(dev, sg);
1576 return sg_dma_address(sg);
1573} 1577}
1574 1578
1575/** 1579/**
@@ -1580,8 +1584,9 @@ static inline u64 ib_sg_dma_address(struct ib_device *dev,
1580static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 1584static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1581 struct scatterlist *sg) 1585 struct scatterlist *sg)
1582{ 1586{
1583 return dev->dma_ops ? 1587 if (dev->dma_ops)
1584 dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg); 1588 return dev->dma_ops->dma_len(dev, sg);
1589 return sg_dma_len(sg);
1585} 1590}
1586 1591
1587/** 1592/**
@@ -1596,8 +1601,9 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1596 size_t size, 1601 size_t size,
1597 enum dma_data_direction dir) 1602 enum dma_data_direction dir)
1598{ 1603{
1599 dev->dma_ops ? 1604 if (dev->dma_ops)
1600 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) : 1605 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1606 else
1601 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 1607 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1602} 1608}
1603 1609
@@ -1613,8 +1619,9 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1613 size_t size, 1619 size_t size,
1614 enum dma_data_direction dir) 1620 enum dma_data_direction dir)
1615{ 1621{
1616 dev->dma_ops ? 1622 if (dev->dma_ops)
1617 dev->dma_ops->sync_single_for_device(dev, addr, size, dir) : 1623 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1624 else
1618 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 1625 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1619} 1626}
1620 1627
@@ -1630,9 +1637,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1630 u64 *dma_handle, 1637 u64 *dma_handle,
1631 gfp_t flag) 1638 gfp_t flag)
1632{ 1639{
1633 return dev->dma_ops ? 1640 if (dev->dma_ops)
1634 dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) : 1641 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1635 dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 1642 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
1636} 1643}
1637 1644
1638/** 1645/**
@@ -1646,8 +1653,9 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
1646 size_t size, void *cpu_addr, 1653 size_t size, void *cpu_addr,
1647 u64 dma_handle) 1654 u64 dma_handle)
1648{ 1655{
1649 dev->dma_ops ? 1656 if (dev->dma_ops)
1650 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) : 1657 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1658 else
1651 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 1659 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1652} 1660}
1653 1661