aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2006-12-12 17:27:41 -0500
committerRoland Dreier <rolandd@cisco.com>2006-12-12 17:27:41 -0500
commit9b513090a3c5e4964f9ac09016c1586988abb3d5 (patch)
tree8b71e45be3fae1ef83a7a2808141fb02e7160fae
parent75216638572f53612304c05a374f0246fe1d16da (diff)
IB: Add DMA mapping functions to allow device drivers to interpose
The QLogic InfiniPath HCAs use programmed I/O instead of HW DMA. This patch allows a verbs device driver to interpose on DMA mapping function calls in order to avoid relying on bus_to_virt() and phys_to_virt() to undo the mappings created by dma_map_single(), dma_map_sg(), etc. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--include/rdma/ib_verbs.h253
1 files changed, 253 insertions, 0 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8eacc3510993..fd2353fa7e12 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -43,6 +43,8 @@
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
46 48
47#include <asm/atomic.h> 49#include <asm/atomic.h>
48#include <asm/scatterlist.h> 50#include <asm/scatterlist.h>
@@ -848,6 +850,49 @@ struct ib_cache {
848 u8 *lmc_cache; 850 u8 *lmc_cache;
849}; 851};
850 852
853struct ib_dma_mapping_ops {
854 int (*mapping_error)(struct ib_device *dev,
855 u64 dma_addr);
856 u64 (*map_single)(struct ib_device *dev,
857 void *ptr, size_t size,
858 enum dma_data_direction direction);
859 void (*unmap_single)(struct ib_device *dev,
860 u64 addr, size_t size,
861 enum dma_data_direction direction);
862 u64 (*map_page)(struct ib_device *dev,
863 struct page *page, unsigned long offset,
864 size_t size,
865 enum dma_data_direction direction);
866 void (*unmap_page)(struct ib_device *dev,
867 u64 addr, size_t size,
868 enum dma_data_direction direction);
869 int (*map_sg)(struct ib_device *dev,
870 struct scatterlist *sg, int nents,
871 enum dma_data_direction direction);
872 void (*unmap_sg)(struct ib_device *dev,
873 struct scatterlist *sg, int nents,
874 enum dma_data_direction direction);
875 u64 (*dma_address)(struct ib_device *dev,
876 struct scatterlist *sg);
877 unsigned int (*dma_len)(struct ib_device *dev,
878 struct scatterlist *sg);
879 void (*sync_single_for_cpu)(struct ib_device *dev,
880 u64 dma_handle,
881 size_t size,
882 enum dma_data_direction dir);
883 void (*sync_single_for_device)(struct ib_device *dev,
884 u64 dma_handle,
885 size_t size,
886 enum dma_data_direction dir);
887 void *(*alloc_coherent)(struct ib_device *dev,
888 size_t size,
889 u64 *dma_handle,
890 gfp_t flag);
891 void (*free_coherent)(struct ib_device *dev,
892 size_t size, void *cpu_addr,
893 u64 dma_handle);
894};
895
851struct iw_cm_verbs; 896struct iw_cm_verbs;
852 897
853struct ib_device { 898struct ib_device {
@@ -992,6 +1037,8 @@ struct ib_device {
992 struct ib_mad *in_mad, 1037 struct ib_mad *in_mad,
993 struct ib_mad *out_mad); 1038 struct ib_mad *out_mad);
994 1039
1040 struct ib_dma_mapping_ops *dma_ops;
1041
995 struct module *owner; 1042 struct module *owner;
996 struct class_device class_dev; 1043 struct class_device class_dev;
997 struct kobject ports_parent; 1044 struct kobject ports_parent;
@@ -1395,10 +1442,216 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1395 * usable for DMA. 1442 * usable for DMA.
1396 * @pd: The protection domain associated with the memory region. 1443 * @pd: The protection domain associated with the memory region.
1397 * @mr_access_flags: Specifies the memory access rights. 1444 * @mr_access_flags: Specifies the memory access rights.
1445 *
1446 * Note that the ib_dma_*() functions defined below must be used
1447 * to create/destroy addresses used with the Lkey or Rkey returned
1448 * by ib_get_dma_mr().
1398 */ 1449 */
1399struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1450struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1400 1451
1401/** 1452/**
1453 * ib_dma_mapping_error - check a DMA addr for error
1454 * @dev: The device for which the dma_addr was created
1455 * @dma_addr: The DMA address to check
1456 */
1457static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1458{
1459 return dev->dma_ops ?
1460 dev->dma_ops->mapping_error(dev, dma_addr) :
1461 dma_mapping_error(dma_addr);
1462}
1463
1464/**
1465 * ib_dma_map_single - Map a kernel virtual address to DMA address
1466 * @dev: The device for which the dma_addr is to be created
1467 * @cpu_addr: The kernel virtual address
1468 * @size: The size of the region in bytes
1469 * @direction: The direction of the DMA
1470 */
1471static inline u64 ib_dma_map_single(struct ib_device *dev,
1472 void *cpu_addr, size_t size,
1473 enum dma_data_direction direction)
1474{
1475 return dev->dma_ops ?
1476 dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
1477 dma_map_single(dev->dma_device, cpu_addr, size, direction);
1478}
1479
1480/**
1481 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1482 * @dev: The device for which the DMA address was created
1483 * @addr: The DMA address
1484 * @size: The size of the region in bytes
1485 * @direction: The direction of the DMA
1486 */
1487static inline void ib_dma_unmap_single(struct ib_device *dev,
1488 u64 addr, size_t size,
1489 enum dma_data_direction direction)
1490{
1491 dev->dma_ops ?
1492 dev->dma_ops->unmap_single(dev, addr, size, direction) :
1493 dma_unmap_single(dev->dma_device, addr, size, direction);
1494}
1495
1496/**
1497 * ib_dma_map_page - Map a physical page to DMA address
1498 * @dev: The device for which the dma_addr is to be created
1499 * @page: The page to be mapped
1500 * @offset: The offset within the page
1501 * @size: The size of the region in bytes
1502 * @direction: The direction of the DMA
1503 */
1504static inline u64 ib_dma_map_page(struct ib_device *dev,
1505 struct page *page,
1506 unsigned long offset,
1507 size_t size,
1508 enum dma_data_direction direction)
1509{
1510 return dev->dma_ops ?
1511 dev->dma_ops->map_page(dev, page, offset, size, direction) :
1512 dma_map_page(dev->dma_device, page, offset, size, direction);
1513}
1514
1515/**
1516 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1517 * @dev: The device for which the DMA address was created
1518 * @addr: The DMA address
1519 * @size: The size of the region in bytes
1520 * @direction: The direction of the DMA
1521 */
1522static inline void ib_dma_unmap_page(struct ib_device *dev,
1523 u64 addr, size_t size,
1524 enum dma_data_direction direction)
1525{
1526 dev->dma_ops ?
1527 dev->dma_ops->unmap_page(dev, addr, size, direction) :
1528 dma_unmap_page(dev->dma_device, addr, size, direction);
1529}
1530
1531/**
1532 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1533 * @dev: The device for which the DMA addresses are to be created
1534 * @sg: The array of scatter/gather entries
1535 * @nents: The number of scatter/gather entries
1536 * @direction: The direction of the DMA
1537 */
1538static inline int ib_dma_map_sg(struct ib_device *dev,
1539 struct scatterlist *sg, int nents,
1540 enum dma_data_direction direction)
1541{
1542 return dev->dma_ops ?
1543 dev->dma_ops->map_sg(dev, sg, nents, direction) :
1544 dma_map_sg(dev->dma_device, sg, nents, direction);
1545}
1546
1547/**
1548 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1549 * @dev: The device for which the DMA addresses were created
1550 * @sg: The array of scatter/gather entries
1551 * @nents: The number of scatter/gather entries
1552 * @direction: The direction of the DMA
1553 */
1554static inline void ib_dma_unmap_sg(struct ib_device *dev,
1555 struct scatterlist *sg, int nents,
1556 enum dma_data_direction direction)
1557{
1558 dev->dma_ops ?
1559 dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
1560 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1561}
1562
1563/**
1564 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1565 * @dev: The device for which the DMA addresses were created
1566 * @sg: The scatter/gather entry
1567 */
1568static inline u64 ib_sg_dma_address(struct ib_device *dev,
1569 struct scatterlist *sg)
1570{
1571 return dev->dma_ops ?
1572 dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
1573}
1574
1575/**
1576 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1577 * @dev: The device for which the DMA addresses were created
1578 * @sg: The scatter/gather entry
1579 */
1580static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1581 struct scatterlist *sg)
1582{
1583 return dev->dma_ops ?
1584 dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
1585}
1586
1587/**
1588 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1589 * @dev: The device for which the DMA address was created
1590 * @addr: The DMA address
1591 * @size: The size of the region in bytes
1592 * @dir: The direction of the DMA
1593 */
1594static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1595 u64 addr,
1596 size_t size,
1597 enum dma_data_direction dir)
1598{
1599 dev->dma_ops ?
1600 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
1601 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1602}
1603
1604/**
1605 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1606 * @dev: The device for which the DMA address was created
1607 * @addr: The DMA address
1608 * @size: The size of the region in bytes
1609 * @dir: The direction of the DMA
1610 */
1611static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1612 u64 addr,
1613 size_t size,
1614 enum dma_data_direction dir)
1615{
1616 dev->dma_ops ?
1617 dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
1618 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1619}
1620
1621/**
1622 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1623 * @dev: The device for which the DMA address is requested
1624 * @size: The size of the region to allocate in bytes
1625 * @dma_handle: A pointer for returning the DMA address of the region
1626 * @flag: memory allocator flags
1627 */
1628static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1629 size_t size,
1630 u64 *dma_handle,
1631 gfp_t flag)
1632{
1633 return dev->dma_ops ?
1634 dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) :
1635 dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
1636}
1637
1638/**
1639 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1640 * @dev: The device for which the DMA addresses were allocated
1641 * @size: The size of the region
1642 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1643 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1644 */
1645static inline void ib_dma_free_coherent(struct ib_device *dev,
1646 size_t size, void *cpu_addr,
1647 u64 dma_handle)
1648{
1649 dev->dma_ops ?
1650 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) :
1651 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1652}
1653
1654/**
1402 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1655 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1403 * by an HCA. 1656 * by an HCA.
1404 * @pd: The protection domain associated assigned to the registered region. 1657 * @pd: The protection domain associated assigned to the registered region.