aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe.brucker@arm.com>2019-04-10 11:15:16 -0400
committerJoerg Roedel <jroedel@suse.de>2019-04-11 11:08:52 -0400
commit26b25a2b98e45aeb40eedcedc586ad5034cbd984 (patch)
tree267d8417e78f528e7a5e1b342561359737225e04
parenta3a195929d40b38833ffd0f82b2db2cc898641eb (diff)
iommu: Bind process address spaces to devices
Add bind() and unbind() operations to the IOMMU API. iommu_sva_bind_device() binds a device to an mm, and returns a handle to the bond, which is released by calling iommu_sva_unbind_device(). Each mm bound to devices gets a PASID (by convention, a 20-bit system-wide ID representing the address space), which can be retrieved with iommu_sva_get_pasid(). When programming DMA addresses, device drivers include this PASID in a device-specific manner, to let the device access the given address space. Since the process memory may be paged out, device and IOMMU must support I/O page faults (e.g. PCI PRI). Using iommu_sva_set_ops(), device drivers provide an mm_exit() callback that is called by the IOMMU driver if the process exits before the device driver called unbind(). In mm_exit(), device driver should disable DMA from the given context, so that the core IOMMU can reallocate the PASID. Whether the process exited or nor, the device driver should always release the handle with unbind(). To use these functions, device driver must first enable the IOMMU_DEV_FEAT_SVA device feature with iommu_dev_enable_feature(). Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/iommu.c104
-rw-r--r--include/linux/iommu.h70
2 files changed, 174 insertions, 0 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 344e27e8f188..f8fe112e507a 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2135,3 +2135,107 @@ int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2135 return ret; 2135 return ret;
2136} 2136}
2137EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); 2137EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2138
2139/**
2140 * iommu_sva_bind_device() - Bind a process address space to a device
2141 * @dev: the device
2142 * @mm: the mm to bind, caller must hold a reference to it
2143 *
2144 * Create a bond between device and address space, allowing the device to access
2145 * the mm using the returned PASID. If a bond already exists between @device and
2146 * @mm, it is returned and an additional reference is taken. Caller must call
2147 * iommu_sva_unbind_device() to release each reference.
2148 *
2149 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2150 * initialize the required SVA features.
2151 *
2152 * On error, returns an ERR_PTR value.
2153 */
2154struct iommu_sva *
2155iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2156{
2157 struct iommu_group *group;
2158 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2159 const struct iommu_ops *ops = dev->bus->iommu_ops;
2160
2161 if (!ops || !ops->sva_bind)
2162 return ERR_PTR(-ENODEV);
2163
2164 group = iommu_group_get(dev);
2165 if (!group)
2166 return ERR_PTR(-ENODEV);
2167
2168 /* Ensure device count and domain don't change while we're binding */
2169 mutex_lock(&group->mutex);
2170
2171 /*
2172 * To keep things simple, SVA currently doesn't support IOMMU groups
2173 * with more than one device. Existing SVA-capable systems are not
2174 * affected by the problems that required IOMMU groups (lack of ACS
2175 * isolation, device ID aliasing and other hardware issues).
2176 */
2177 if (iommu_group_device_count(group) != 1)
2178 goto out_unlock;
2179
2180 handle = ops->sva_bind(dev, mm, drvdata);
2181
2182out_unlock:
2183 mutex_unlock(&group->mutex);
2184 iommu_group_put(group);
2185
2186 return handle;
2187}
2188EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2189
2190/**
2191 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2192 * @handle: the handle returned by iommu_sva_bind_device()
2193 *
2194 * Put reference to a bond between device and address space. The device should
2195 * not be issuing any more transaction for this PASID. All outstanding page
2196 * requests for this PASID must have been flushed to the IOMMU.
2197 *
2198 * Returns 0 on success, or an error value
2199 */
2200void iommu_sva_unbind_device(struct iommu_sva *handle)
2201{
2202 struct iommu_group *group;
2203 struct device *dev = handle->dev;
2204 const struct iommu_ops *ops = dev->bus->iommu_ops;
2205
2206 if (!ops || !ops->sva_unbind)
2207 return;
2208
2209 group = iommu_group_get(dev);
2210 if (!group)
2211 return;
2212
2213 mutex_lock(&group->mutex);
2214 ops->sva_unbind(handle);
2215 mutex_unlock(&group->mutex);
2216
2217 iommu_group_put(group);
2218}
2219EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2220
2221int iommu_sva_set_ops(struct iommu_sva *handle,
2222 const struct iommu_sva_ops *sva_ops)
2223{
2224 if (handle->ops && handle->ops != sva_ops)
2225 return -EEXIST;
2226
2227 handle->ops = sva_ops;
2228 return 0;
2229}
2230EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2231
2232int iommu_sva_get_pasid(struct iommu_sva *handle)
2233{
2234 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2235
2236 if (!ops || !ops->sva_get_pasid)
2237 return IOMMU_PASID_INVALID;
2238
2239 return ops->sva_get_pasid(handle);
2240}
2241EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8239ece9fdfc..480921dfbadf 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -48,6 +48,7 @@ struct bus_type;
48struct device; 48struct device;
49struct iommu_domain; 49struct iommu_domain;
50struct notifier_block; 50struct notifier_block;
51struct iommu_sva;
51 52
52/* iommu fault flags */ 53/* iommu fault flags */
53#define IOMMU_FAULT_READ 0x0 54#define IOMMU_FAULT_READ 0x0
@@ -55,6 +56,8 @@ struct notifier_block;
55 56
56typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 57typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
57 struct device *, unsigned long, int, void *); 58 struct device *, unsigned long, int, void *);
59typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
60 void *);
58 61
59struct iommu_domain_geometry { 62struct iommu_domain_geometry {
60 dma_addr_t aperture_start; /* First address that can be mapped */ 63 dma_addr_t aperture_start; /* First address that can be mapped */
@@ -159,6 +162,28 @@ struct iommu_resv_region {
159/* Per device IOMMU features */ 162/* Per device IOMMU features */
160enum iommu_dev_features { 163enum iommu_dev_features {
161 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ 164 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
165 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
166};
167
168#define IOMMU_PASID_INVALID (-1U)
169
170/**
171 * struct iommu_sva_ops - device driver callbacks for an SVA context
172 *
173 * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
174 * @mm_exit returns, the device must not issue any more transaction
175 * with the PASID given as argument.
176 *
177 * The @mm_exit handler is allowed to sleep. Be careful about the
178 * locks taken in @mm_exit, because they might lead to deadlocks if
179 * they are also held when dropping references to the mm. Consider the
180 * following call chain:
181 * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
182 * Using mmput_async() prevents this scenario.
183 *
184 */
185struct iommu_sva_ops {
186 iommu_mm_exit_handler_t mm_exit;
162}; 187};
163 188
164#ifdef CONFIG_IOMMU_API 189#ifdef CONFIG_IOMMU_API
@@ -196,6 +221,9 @@ enum iommu_dev_features {
196 * @dev_feat_enabled: check enabled feature 221 * @dev_feat_enabled: check enabled feature
197 * @aux_attach/detach_dev: aux-domain specific attach/detach entries. 222 * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
198 * @aux_get_pasid: get the pasid given an aux-domain 223 * @aux_get_pasid: get the pasid given an aux-domain
224 * @sva_bind: Bind process address space to device
225 * @sva_unbind: Unbind process address space from device
226 * @sva_get_pasid: Get PASID associated to a SVA handle
199 * @pgsize_bitmap: bitmap of all possible supported page sizes 227 * @pgsize_bitmap: bitmap of all possible supported page sizes
200 */ 228 */
201struct iommu_ops { 229struct iommu_ops {
@@ -251,6 +279,11 @@ struct iommu_ops {
251 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); 279 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
252 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); 280 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
253 281
282 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
283 void *drvdata);
284 void (*sva_unbind)(struct iommu_sva *handle);
285 int (*sva_get_pasid)(struct iommu_sva *handle);
286
254 unsigned long pgsize_bitmap; 287 unsigned long pgsize_bitmap;
255}; 288};
256 289
@@ -417,6 +450,14 @@ struct iommu_fwspec {
417 u32 ids[1]; 450 u32 ids[1];
418}; 451};
419 452
453/**
454 * struct iommu_sva - handle to a device-mm bond
455 */
456struct iommu_sva {
457 struct device *dev;
458 const struct iommu_sva_ops *ops;
459};
460
420int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 461int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
421 const struct iommu_ops *ops); 462 const struct iommu_ops *ops);
422void iommu_fwspec_free(struct device *dev); 463void iommu_fwspec_free(struct device *dev);
@@ -445,6 +486,14 @@ int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
445void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); 486void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
446int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); 487int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
447 488
489struct iommu_sva *iommu_sva_bind_device(struct device *dev,
490 struct mm_struct *mm,
491 void *drvdata);
492void iommu_sva_unbind_device(struct iommu_sva *handle);
493int iommu_sva_set_ops(struct iommu_sva *handle,
494 const struct iommu_sva_ops *ops);
495int iommu_sva_get_pasid(struct iommu_sva *handle);
496
448#else /* CONFIG_IOMMU_API */ 497#else /* CONFIG_IOMMU_API */
449 498
450struct iommu_ops {}; 499struct iommu_ops {};
@@ -770,6 +819,27 @@ iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
770 return -ENODEV; 819 return -ENODEV;
771} 820}
772 821
822static inline struct iommu_sva *
823iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
824{
825 return NULL;
826}
827
828static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
829{
830}
831
832static inline int iommu_sva_set_ops(struct iommu_sva *handle,
833 const struct iommu_sva_ops *ops)
834{
835 return -EINVAL;
836}
837
838static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
839{
840 return IOMMU_PASID_INVALID;
841}
842
773#endif /* CONFIG_IOMMU_API */ 843#endif /* CONFIG_IOMMU_API */
774 844
775#ifdef CONFIG_IOMMU_DEBUGFS 845#ifdef CONFIG_IOMMU_DEBUGFS