diff options
Diffstat (limited to 'include/rdma/ib_verbs.h')
-rw-r--r-- | include/rdma/ib_verbs.h | 271 |
1 files changed, 270 insertions, 1 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8eacc3510993..765589f4d166 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -43,6 +43,9 @@ | |||
43 | 43 | ||
44 | #include <linux/types.h> | 44 | #include <linux/types.h> |
45 | #include <linux/device.h> | 45 | #include <linux/device.h> |
46 | #include <linux/mm.h> | ||
47 | #include <linux/dma-mapping.h> | ||
48 | #include <linux/kref.h> | ||
46 | 49 | ||
47 | #include <asm/atomic.h> | 50 | #include <asm/atomic.h> |
48 | #include <asm/scatterlist.h> | 51 | #include <asm/scatterlist.h> |
@@ -417,8 +420,8 @@ struct ib_wc { | |||
417 | enum ib_wc_opcode opcode; | 420 | enum ib_wc_opcode opcode; |
418 | u32 vendor_err; | 421 | u32 vendor_err; |
419 | u32 byte_len; | 422 | u32 byte_len; |
423 | struct ib_qp *qp; | ||
420 | __be32 imm_data; | 424 | __be32 imm_data; |
421 | u32 qp_num; | ||
422 | u32 src_qp; | 425 | u32 src_qp; |
423 | int wc_flags; | 426 | int wc_flags; |
424 | u16 pkey_index; | 427 | u16 pkey_index; |
@@ -848,6 +851,49 @@ struct ib_cache { | |||
848 | u8 *lmc_cache; | 851 | u8 *lmc_cache; |
849 | }; | 852 | }; |
850 | 853 | ||
854 | struct ib_dma_mapping_ops { | ||
855 | int (*mapping_error)(struct ib_device *dev, | ||
856 | u64 dma_addr); | ||
857 | u64 (*map_single)(struct ib_device *dev, | ||
858 | void *ptr, size_t size, | ||
859 | enum dma_data_direction direction); | ||
860 | void (*unmap_single)(struct ib_device *dev, | ||
861 | u64 addr, size_t size, | ||
862 | enum dma_data_direction direction); | ||
863 | u64 (*map_page)(struct ib_device *dev, | ||
864 | struct page *page, unsigned long offset, | ||
865 | size_t size, | ||
866 | enum dma_data_direction direction); | ||
867 | void (*unmap_page)(struct ib_device *dev, | ||
868 | u64 addr, size_t size, | ||
869 | enum dma_data_direction direction); | ||
870 | int (*map_sg)(struct ib_device *dev, | ||
871 | struct scatterlist *sg, int nents, | ||
872 | enum dma_data_direction direction); | ||
873 | void (*unmap_sg)(struct ib_device *dev, | ||
874 | struct scatterlist *sg, int nents, | ||
875 | enum dma_data_direction direction); | ||
876 | u64 (*dma_address)(struct ib_device *dev, | ||
877 | struct scatterlist *sg); | ||
878 | unsigned int (*dma_len)(struct ib_device *dev, | ||
879 | struct scatterlist *sg); | ||
880 | void (*sync_single_for_cpu)(struct ib_device *dev, | ||
881 | u64 dma_handle, | ||
882 | size_t size, | ||
883 | enum dma_data_direction dir); | ||
884 | void (*sync_single_for_device)(struct ib_device *dev, | ||
885 | u64 dma_handle, | ||
886 | size_t size, | ||
887 | enum dma_data_direction dir); | ||
888 | void *(*alloc_coherent)(struct ib_device *dev, | ||
889 | size_t size, | ||
890 | u64 *dma_handle, | ||
891 | gfp_t flag); | ||
892 | void (*free_coherent)(struct ib_device *dev, | ||
893 | size_t size, void *cpu_addr, | ||
894 | u64 dma_handle); | ||
895 | }; | ||
896 | |||
851 | struct iw_cm_verbs; | 897 | struct iw_cm_verbs; |
852 | 898 | ||
853 | struct ib_device { | 899 | struct ib_device { |
@@ -992,6 +1038,8 @@ struct ib_device { | |||
992 | struct ib_mad *in_mad, | 1038 | struct ib_mad *in_mad, |
993 | struct ib_mad *out_mad); | 1039 | struct ib_mad *out_mad); |
994 | 1040 | ||
1041 | struct ib_dma_mapping_ops *dma_ops; | ||
1042 | |||
995 | struct module *owner; | 1043 | struct module *owner; |
996 | struct class_device class_dev; | 1044 | struct class_device class_dev; |
997 | struct kobject ports_parent; | 1045 | struct kobject ports_parent; |
@@ -1395,10 +1443,231 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) | |||
1395 | * usable for DMA. | 1443 | * usable for DMA. |
1396 | * @pd: The protection domain associated with the memory region. | 1444 | * @pd: The protection domain associated with the memory region. |
1397 | * @mr_access_flags: Specifies the memory access rights. | 1445 | * @mr_access_flags: Specifies the memory access rights. |
1446 | * | ||
1447 | * Note that the ib_dma_*() functions defined below must be used | ||
1448 | * to create/destroy addresses used with the Lkey or Rkey returned | ||
1449 | * by ib_get_dma_mr(). | ||
1398 | */ | 1450 | */ |
1399 | struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); | 1451 | struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); |
1400 | 1452 | ||
1401 | /** | 1453 | /** |
1454 | * ib_dma_mapping_error - check a DMA addr for error | ||
1455 | * @dev: The device for which the dma_addr was created | ||
1456 | * @dma_addr: The DMA address to check | ||
1457 | */ | ||
1458 | static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) | ||
1459 | { | ||
1460 | if (dev->dma_ops) | ||
1461 | return dev->dma_ops->mapping_error(dev, dma_addr); | ||
1462 | return dma_mapping_error(dma_addr); | ||
1463 | } | ||
1464 | |||
1465 | /** | ||
1466 | * ib_dma_map_single - Map a kernel virtual address to DMA address | ||
1467 | * @dev: The device for which the dma_addr is to be created | ||
1468 | * @cpu_addr: The kernel virtual address | ||
1469 | * @size: The size of the region in bytes | ||
1470 | * @direction: The direction of the DMA | ||
1471 | */ | ||
1472 | static inline u64 ib_dma_map_single(struct ib_device *dev, | ||
1473 | void *cpu_addr, size_t size, | ||
1474 | enum dma_data_direction direction) | ||
1475 | { | ||
1476 | if (dev->dma_ops) | ||
1477 | return dev->dma_ops->map_single(dev, cpu_addr, size, direction); | ||
1478 | return dma_map_single(dev->dma_device, cpu_addr, size, direction); | ||
1479 | } | ||
1480 | |||
1481 | /** | ||
1482 | * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() | ||
1483 | * @dev: The device for which the DMA address was created | ||
1484 | * @addr: The DMA address | ||
1485 | * @size: The size of the region in bytes | ||
1486 | * @direction: The direction of the DMA | ||
1487 | */ | ||
1488 | static inline void ib_dma_unmap_single(struct ib_device *dev, | ||
1489 | u64 addr, size_t size, | ||
1490 | enum dma_data_direction direction) | ||
1491 | { | ||
1492 | if (dev->dma_ops) | ||
1493 | dev->dma_ops->unmap_single(dev, addr, size, direction); | ||
1494 | else | ||
1495 | dma_unmap_single(dev->dma_device, addr, size, direction); | ||
1496 | } | ||
1497 | |||
1498 | /** | ||
1499 | * ib_dma_map_page - Map a physical page to DMA address | ||
1500 | * @dev: The device for which the dma_addr is to be created | ||
1501 | * @page: The page to be mapped | ||
1502 | * @offset: The offset within the page | ||
1503 | * @size: The size of the region in bytes | ||
1504 | * @direction: The direction of the DMA | ||
1505 | */ | ||
1506 | static inline u64 ib_dma_map_page(struct ib_device *dev, | ||
1507 | struct page *page, | ||
1508 | unsigned long offset, | ||
1509 | size_t size, | ||
1510 | enum dma_data_direction direction) | ||
1511 | { | ||
1512 | if (dev->dma_ops) | ||
1513 | return dev->dma_ops->map_page(dev, page, offset, size, direction); | ||
1514 | return dma_map_page(dev->dma_device, page, offset, size, direction); | ||
1515 | } | ||
1516 | |||
1517 | /** | ||
1518 | * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() | ||
1519 | * @dev: The device for which the DMA address was created | ||
1520 | * @addr: The DMA address | ||
1521 | * @size: The size of the region in bytes | ||
1522 | * @direction: The direction of the DMA | ||
1523 | */ | ||
1524 | static inline void ib_dma_unmap_page(struct ib_device *dev, | ||
1525 | u64 addr, size_t size, | ||
1526 | enum dma_data_direction direction) | ||
1527 | { | ||
1528 | if (dev->dma_ops) | ||
1529 | dev->dma_ops->unmap_page(dev, addr, size, direction); | ||
1530 | else | ||
1531 | dma_unmap_page(dev->dma_device, addr, size, direction); | ||
1532 | } | ||
1533 | |||
1534 | /** | ||
1535 | * ib_dma_map_sg - Map a scatter/gather list to DMA addresses | ||
1536 | * @dev: The device for which the DMA addresses are to be created | ||
1537 | * @sg: The array of scatter/gather entries | ||
1538 | * @nents: The number of scatter/gather entries | ||
1539 | * @direction: The direction of the DMA | ||
1540 | */ | ||
1541 | static inline int ib_dma_map_sg(struct ib_device *dev, | ||
1542 | struct scatterlist *sg, int nents, | ||
1543 | enum dma_data_direction direction) | ||
1544 | { | ||
1545 | if (dev->dma_ops) | ||
1546 | return dev->dma_ops->map_sg(dev, sg, nents, direction); | ||
1547 | return dma_map_sg(dev->dma_device, sg, nents, direction); | ||
1548 | } | ||
1549 | |||
1550 | /** | ||
1551 | * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses | ||
1552 | * @dev: The device for which the DMA addresses were created | ||
1553 | * @sg: The array of scatter/gather entries | ||
1554 | * @nents: The number of scatter/gather entries | ||
1555 | * @direction: The direction of the DMA | ||
1556 | */ | ||
1557 | static inline void ib_dma_unmap_sg(struct ib_device *dev, | ||
1558 | struct scatterlist *sg, int nents, | ||
1559 | enum dma_data_direction direction) | ||
1560 | { | ||
1561 | if (dev->dma_ops) | ||
1562 | dev->dma_ops->unmap_sg(dev, sg, nents, direction); | ||
1563 | else | ||
1564 | dma_unmap_sg(dev->dma_device, sg, nents, direction); | ||
1565 | } | ||
1566 | |||
1567 | /** | ||
1568 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry | ||
1569 | * @dev: The device for which the DMA addresses were created | ||
1570 | * @sg: The scatter/gather entry | ||
1571 | */ | ||
1572 | static inline u64 ib_sg_dma_address(struct ib_device *dev, | ||
1573 | struct scatterlist *sg) | ||
1574 | { | ||
1575 | if (dev->dma_ops) | ||
1576 | return dev->dma_ops->dma_address(dev, sg); | ||
1577 | return sg_dma_address(sg); | ||
1578 | } | ||
1579 | |||
1580 | /** | ||
1581 | * ib_sg_dma_len - Return the DMA length from a scatter/gather entry | ||
1582 | * @dev: The device for which the DMA addresses were created | ||
1583 | * @sg: The scatter/gather entry | ||
1584 | */ | ||
1585 | static inline unsigned int ib_sg_dma_len(struct ib_device *dev, | ||
1586 | struct scatterlist *sg) | ||
1587 | { | ||
1588 | if (dev->dma_ops) | ||
1589 | return dev->dma_ops->dma_len(dev, sg); | ||
1590 | return sg_dma_len(sg); | ||
1591 | } | ||
1592 | |||
1593 | /** | ||
1594 | * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU | ||
1595 | * @dev: The device for which the DMA address was created | ||
1596 | * @addr: The DMA address | ||
1597 | * @size: The size of the region in bytes | ||
1598 | * @dir: The direction of the DMA | ||
1599 | */ | ||
1600 | static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, | ||
1601 | u64 addr, | ||
1602 | size_t size, | ||
1603 | enum dma_data_direction dir) | ||
1604 | { | ||
1605 | if (dev->dma_ops) | ||
1606 | dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); | ||
1607 | else | ||
1608 | dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); | ||
1609 | } | ||
1610 | |||
1611 | /** | ||
1612 | * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device | ||
1613 | * @dev: The device for which the DMA address was created | ||
1614 | * @addr: The DMA address | ||
1615 | * @size: The size of the region in bytes | ||
1616 | * @dir: The direction of the DMA | ||
1617 | */ | ||
1618 | static inline void ib_dma_sync_single_for_device(struct ib_device *dev, | ||
1619 | u64 addr, | ||
1620 | size_t size, | ||
1621 | enum dma_data_direction dir) | ||
1622 | { | ||
1623 | if (dev->dma_ops) | ||
1624 | dev->dma_ops->sync_single_for_device(dev, addr, size, dir); | ||
1625 | else | ||
1626 | dma_sync_single_for_device(dev->dma_device, addr, size, dir); | ||
1627 | } | ||
1628 | |||
1629 | /** | ||
1630 | * ib_dma_alloc_coherent - Allocate memory and map it for DMA | ||
1631 | * @dev: The device for which the DMA address is requested | ||
1632 | * @size: The size of the region to allocate in bytes | ||
1633 | * @dma_handle: A pointer for returning the DMA address of the region | ||
1634 | * @flag: memory allocator flags | ||
1635 | */ | ||
1636 | static inline void *ib_dma_alloc_coherent(struct ib_device *dev, | ||
1637 | size_t size, | ||
1638 | u64 *dma_handle, | ||
1639 | gfp_t flag) | ||
1640 | { | ||
1641 | if (dev->dma_ops) | ||
1642 | return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
1643 | else { | ||
1644 | dma_addr_t handle; | ||
1645 | void *ret; | ||
1646 | |||
1647 | ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); | ||
1648 | *dma_handle = handle; | ||
1649 | return ret; | ||
1650 | } | ||
1651 | } | ||
1652 | |||
1653 | /** | ||
1654 | * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() | ||
1655 | * @dev: The device for which the DMA addresses were allocated | ||
1656 | * @size: The size of the region | ||
1657 | * @cpu_addr: the address returned by ib_dma_alloc_coherent() | ||
1658 | * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() | ||
1659 | */ | ||
1660 | static inline void ib_dma_free_coherent(struct ib_device *dev, | ||
1661 | size_t size, void *cpu_addr, | ||
1662 | u64 dma_handle) | ||
1663 | { | ||
1664 | if (dev->dma_ops) | ||
1665 | dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
1666 | else | ||
1667 | dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); | ||
1668 | } | ||
1669 | |||
1670 | /** | ||
1402 | * ib_reg_phys_mr - Prepares a virtually addressed memory region for use | 1671 | * ib_reg_phys_mr - Prepares a virtually addressed memory region for use |
1403 | * by an HCA. | 1672 | * by an HCA. |
1404 | * @pd: The protection domain associated assigned to the registered region. | 1673 | * @pd: The protection domain associated assigned to the registered region. |