aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/iommu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/iommu.h')
-rw-r--r--include/linux/iommu.h105
1 files changed, 105 insertions, 0 deletions
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e552c3b63f6f..fdc355ccc570 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <uapi/linux/iommu.h>
16 17
17#define IOMMU_READ (1 << 0) 18#define IOMMU_READ (1 << 0)
18#define IOMMU_WRITE (1 << 1) 19#define IOMMU_WRITE (1 << 1)
@@ -29,6 +30,12 @@
29 * if the IOMMU page table format is equivalent. 30 * if the IOMMU page table format is equivalent.
30 */ 31 */
31#define IOMMU_PRIV (1 << 5) 32#define IOMMU_PRIV (1 << 5)
33/*
34 * Non-coherent masters on few Qualcomm SoCs can use this page protection flag
35 * to set correct cacheability attributes to use an outer level of cache -
36 * last level cache, aka system cache.
37 */
38#define IOMMU_QCOM_SYS_CACHE (1 << 6)
32 39
33struct iommu_ops; 40struct iommu_ops;
34struct iommu_group; 41struct iommu_group;
@@ -37,6 +44,7 @@ struct device;
37struct iommu_domain; 44struct iommu_domain;
38struct notifier_block; 45struct notifier_block;
39struct iommu_sva; 46struct iommu_sva;
47struct iommu_fault_event;
40 48
41/* iommu fault flags */ 49/* iommu fault flags */
42#define IOMMU_FAULT_READ 0x0 50#define IOMMU_FAULT_READ 0x0
@@ -46,6 +54,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
46 struct device *, unsigned long, int, void *); 54 struct device *, unsigned long, int, void *);
47typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, 55typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
48 void *); 56 void *);
57typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
49 58
50struct iommu_domain_geometry { 59struct iommu_domain_geometry {
51 dma_addr_t aperture_start; /* First address that can be mapped */ 60 dma_addr_t aperture_start; /* First address that can be mapped */
@@ -123,6 +132,12 @@ enum iommu_attr {
123enum iommu_resv_type { 132enum iommu_resv_type {
124 /* Memory regions which must be mapped 1:1 at all times */ 133 /* Memory regions which must be mapped 1:1 at all times */
125 IOMMU_RESV_DIRECT, 134 IOMMU_RESV_DIRECT,
135 /*
136 * Memory regions which are advertised to be 1:1 but are
137 * commonly considered relaxable in some conditions,
138 * for instance in device assignment use case (USB, Graphics)
139 */
140 IOMMU_RESV_DIRECT_RELAXABLE,
126 /* Arbitrary "never map this or give it to a device" address ranges */ 141 /* Arbitrary "never map this or give it to a device" address ranges */
127 IOMMU_RESV_RESERVED, 142 IOMMU_RESV_RESERVED,
128 /* Hardware MSI region (untranslated) */ 143 /* Hardware MSI region (untranslated) */
@@ -212,6 +227,7 @@ struct iommu_sva_ops {
212 * @sva_bind: Bind process address space to device 227 * @sva_bind: Bind process address space to device
213 * @sva_unbind: Unbind process address space from device 228 * @sva_unbind: Unbind process address space from device
214 * @sva_get_pasid: Get PASID associated to a SVA handle 229 * @sva_get_pasid: Get PASID associated to a SVA handle
230 * @page_response: handle page request response
215 * @pgsize_bitmap: bitmap of all possible supported page sizes 231 * @pgsize_bitmap: bitmap of all possible supported page sizes
216 */ 232 */
217struct iommu_ops { 233struct iommu_ops {
@@ -272,6 +288,10 @@ struct iommu_ops {
272 void (*sva_unbind)(struct iommu_sva *handle); 288 void (*sva_unbind)(struct iommu_sva *handle);
273 int (*sva_get_pasid)(struct iommu_sva *handle); 289 int (*sva_get_pasid)(struct iommu_sva *handle);
274 290
291 int (*page_response)(struct device *dev,
292 struct iommu_fault_event *evt,
293 struct iommu_page_response *msg);
294
275 unsigned long pgsize_bitmap; 295 unsigned long pgsize_bitmap;
276}; 296};
277 297
@@ -289,6 +309,48 @@ struct iommu_device {
289 struct device *dev; 309 struct device *dev;
290}; 310};
291 311
312/**
313 * struct iommu_fault_event - Generic fault event
314 *
315 * Can represent recoverable faults such as a page requests or
316 * unrecoverable faults such as DMA or IRQ remapping faults.
317 *
318 * @fault: fault descriptor
319 * @list: pending fault event list, used for tracking responses
320 */
321struct iommu_fault_event {
322 struct iommu_fault fault;
323 struct list_head list;
324};
325
326/**
327 * struct iommu_fault_param - per-device IOMMU fault data
328 * @handler: Callback function to handle IOMMU faults at device level
329 * @data: handler private data
330 * @faults: holds the pending faults which needs response
331 * @lock: protect pending faults list
332 */
333struct iommu_fault_param {
334 iommu_dev_fault_handler_t handler;
335 void *data;
336 struct list_head faults;
337 struct mutex lock;
338};
339
340/**
341 * struct iommu_param - collection of per-device IOMMU data
342 *
343 * @fault_param: IOMMU detected device fault reporting data
344 *
345 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
346 * struct iommu_group *iommu_group;
347 * struct iommu_fwspec *iommu_fwspec;
348 */
349struct iommu_param {
350 struct mutex lock;
351 struct iommu_fault_param *fault_param;
352};
353
292int iommu_device_register(struct iommu_device *iommu); 354int iommu_device_register(struct iommu_device *iommu);
293void iommu_device_unregister(struct iommu_device *iommu); 355void iommu_device_unregister(struct iommu_device *iommu);
294int iommu_device_sysfs_add(struct iommu_device *iommu, 356int iommu_device_sysfs_add(struct iommu_device *iommu,
@@ -350,6 +412,7 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
350extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 412extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
351extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 413extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
352extern int iommu_request_dm_for_dev(struct device *dev); 414extern int iommu_request_dm_for_dev(struct device *dev);
415extern int iommu_request_dma_domain_for_dev(struct device *dev);
353extern struct iommu_resv_region * 416extern struct iommu_resv_region *
354iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 417iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
355 enum iommu_resv_type type); 418 enum iommu_resv_type type);
@@ -378,6 +441,17 @@ extern int iommu_group_register_notifier(struct iommu_group *group,
378 struct notifier_block *nb); 441 struct notifier_block *nb);
379extern int iommu_group_unregister_notifier(struct iommu_group *group, 442extern int iommu_group_unregister_notifier(struct iommu_group *group,
380 struct notifier_block *nb); 443 struct notifier_block *nb);
444extern int iommu_register_device_fault_handler(struct device *dev,
445 iommu_dev_fault_handler_t handler,
446 void *data);
447
448extern int iommu_unregister_device_fault_handler(struct device *dev);
449
450extern int iommu_report_device_fault(struct device *dev,
451 struct iommu_fault_event *evt);
452extern int iommu_page_response(struct device *dev,
453 struct iommu_page_response *msg);
454
381extern int iommu_group_id(struct iommu_group *group); 455extern int iommu_group_id(struct iommu_group *group);
382extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); 456extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
383extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 457extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
@@ -492,6 +566,7 @@ struct iommu_ops {};
492struct iommu_group {}; 566struct iommu_group {};
493struct iommu_fwspec {}; 567struct iommu_fwspec {};
494struct iommu_device {}; 568struct iommu_device {};
569struct iommu_fault_param {};
495 570
496static inline bool iommu_present(struct bus_type *bus) 571static inline bool iommu_present(struct bus_type *bus)
497{ 572{
@@ -614,6 +689,11 @@ static inline int iommu_request_dm_for_dev(struct device *dev)
614 return -ENODEV; 689 return -ENODEV;
615} 690}
616 691
692static inline int iommu_request_dma_domain_for_dev(struct device *dev)
693{
694 return -ENODEV;
695}
696
617static inline int iommu_attach_group(struct iommu_domain *domain, 697static inline int iommu_attach_group(struct iommu_domain *domain,
618 struct iommu_group *group) 698 struct iommu_group *group)
619{ 699{
@@ -685,6 +765,31 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group,
685 return 0; 765 return 0;
686} 766}
687 767
768static inline
769int iommu_register_device_fault_handler(struct device *dev,
770 iommu_dev_fault_handler_t handler,
771 void *data)
772{
773 return -ENODEV;
774}
775
776static inline int iommu_unregister_device_fault_handler(struct device *dev)
777{
778 return 0;
779}
780
781static inline
782int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
783{
784 return -ENODEV;
785}
786
787static inline int iommu_page_response(struct device *dev,
788 struct iommu_page_response *msg)
789{
790 return -ENODEV;
791}
792
688static inline int iommu_group_id(struct iommu_group *group) 793static inline int iommu_group_id(struct iommu_group *group)
689{ 794{
690 return -ENODEV; 795 return -ENODEV;