diff options
| author | Joerg Roedel <joerg.roedel@amd.com> | 2010-01-08 07:35:09 -0500 |
|---|---|---|
| committer | Joerg Roedel <joerg.roedel@amd.com> | 2010-03-07 12:01:11 -0500 |
| commit | cefc53c7f494240d4813c80154c7617452d1904d (patch) | |
| tree | 675370ec20df0841e404ed7b191d2d41f30f7e52 /include/linux | |
| parent | 4abc14a733f9002c05623db755aaafdd27fa7a91 (diff) | |
iommu-api: Add iommu_map and iommu_unmap functions
These two functions provide support for mapping and
unmapping physical addresses to io virtual addresses. The
difference to the iommu_(un)map_range() is that the new
functions take a gfp_order parameter instead of a size. This
allows the IOMMU backend implementations to detect easier if
a given range can be mapped by larger page sizes.
These new functions should replace the old ones in the long
term.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/iommu.h | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0f18f37a6503..6d0035bb1a0c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -60,6 +60,10 @@ extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, | |||
| 60 | phys_addr_t paddr, size_t size, int prot); | 60 | phys_addr_t paddr, size_t size, int prot); |
| 61 | extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | 61 | extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, |
| 62 | size_t size); | 62 | size_t size); |
| 63 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, | ||
| 64 | phys_addr_t paddr, int gfp_order, int prot); | ||
| 65 | extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||
| 66 | int gfp_order); | ||
| 63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 67 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
| 64 | unsigned long iova); | 68 | unsigned long iova); |
| 65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | 69 | extern int iommu_domain_has_cap(struct iommu_domain *domain, |
| @@ -108,6 +112,18 @@ static inline void iommu_unmap_range(struct iommu_domain *domain, | |||
| 108 | { | 112 | { |
| 109 | } | 113 | } |
| 110 | 114 | ||
| 115 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, | ||
| 116 | phys_addr_t paddr, int gfp_order, int prot) | ||
| 117 | { | ||
| 118 | return -ENODEV; | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||
| 122 | int gfp_order) | ||
| 123 | { | ||
| 124 | return -ENODEV; | ||
| 125 | } | ||
| 126 | |||
| 111 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 127 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
| 112 | unsigned long iova) | 128 | unsigned long iova) |
| 113 | { | 129 | { |
