diff options
author | Joerg Roedel <jroedel@suse.de> | 2017-01-30 09:58:47 -0500 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2017-01-30 09:58:47 -0500 |
commit | 93fa6cf60aad833e7572a61f98b2d0aa6f67de40 (patch) | |
tree | 3e64532eec9aebf95827854b9a79d12676d49a3c | |
parent | 566cf877a1fcb6d6dc0126b076aad062054c2637 (diff) | |
parent | 5018c8d5ef0c172592eb98cf10e253d47b544ba8 (diff) |
Merge branch 'iommu/guest-msi' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/core
-rw-r--r-- | Documentation/ABI/testing/sysfs-kernel-iommu_groups | 12 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 54 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 30 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 30 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 119 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 92 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 177 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic-v3-its.c | 1 | ||||
-rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 37 | ||||
-rw-r--r-- | include/linux/dma-iommu.h | 6 | ||||
-rw-r--r-- | include/linux/iommu.h | 46 | ||||
-rw-r--r-- | include/linux/irqdomain.h | 36 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 39 | ||||
-rw-r--r-- | kernel/irq/msi.c | 4 |
14 files changed, 590 insertions, 93 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups index 9b31556cfdda..35c64e00b35c 100644 --- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups +++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups | |||
@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub- | |||
12 | file if the IOMMU driver has chosen to register a more | 12 | file if the IOMMU driver has chosen to register a more |
13 | common name for the group. | 13 | common name for the group. |
14 | Users: | 14 | Users: |
15 | |||
16 | What: /sys/kernel/iommu_groups/reserved_regions | ||
17 | Date: January 2017 | ||
18 | KernelVersion: v4.11 | ||
19 | Contact: Eric Auger <eric.auger@redhat.com> | ||
20 | Description: /sys/kernel/iommu_groups/reserved_regions list IOVA | ||
21 | regions that are reserved. Not necessarily all | ||
22 | reserved regions are listed. This is typically used to | ||
23 | output direct-mapped, MSI, non mappable regions. Each | ||
24 | region is described on a single line: the 1st field is | ||
25 | the base IOVA, the second is the end IOVA and the third | ||
26 | field describes the type of the region. | ||
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 3ef0f42984f2..d109e41204e8 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -3161,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap) | |||
3161 | return false; | 3161 | return false; |
3162 | } | 3162 | } |
3163 | 3163 | ||
3164 | static void amd_iommu_get_dm_regions(struct device *dev, | 3164 | static void amd_iommu_get_resv_regions(struct device *dev, |
3165 | struct list_head *head) | 3165 | struct list_head *head) |
3166 | { | 3166 | { |
3167 | struct iommu_resv_region *region; | ||
3167 | struct unity_map_entry *entry; | 3168 | struct unity_map_entry *entry; |
3168 | int devid; | 3169 | int devid; |
3169 | 3170 | ||
@@ -3172,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev, | |||
3172 | return; | 3173 | return; |
3173 | 3174 | ||
3174 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | 3175 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { |
3175 | struct iommu_dm_region *region; | 3176 | size_t length; |
3177 | int prot = 0; | ||
3176 | 3178 | ||
3177 | if (devid < entry->devid_start || devid > entry->devid_end) | 3179 | if (devid < entry->devid_start || devid > entry->devid_end) |
3178 | continue; | 3180 | continue; |
3179 | 3181 | ||
3180 | region = kzalloc(sizeof(*region), GFP_KERNEL); | 3182 | length = entry->address_end - entry->address_start; |
3183 | if (entry->prot & IOMMU_PROT_IR) | ||
3184 | prot |= IOMMU_READ; | ||
3185 | if (entry->prot & IOMMU_PROT_IW) | ||
3186 | prot |= IOMMU_WRITE; | ||
3187 | |||
3188 | region = iommu_alloc_resv_region(entry->address_start, | ||
3189 | length, prot, | ||
3190 | IOMMU_RESV_DIRECT); | ||
3181 | if (!region) { | 3191 | if (!region) { |
3182 | pr_err("Out of memory allocating dm-regions for %s\n", | 3192 | pr_err("Out of memory allocating dm-regions for %s\n", |
3183 | dev_name(dev)); | 3193 | dev_name(dev)); |
3184 | return; | 3194 | return; |
3185 | } | 3195 | } |
3186 | |||
3187 | region->start = entry->address_start; | ||
3188 | region->length = entry->address_end - entry->address_start; | ||
3189 | if (entry->prot & IOMMU_PROT_IR) | ||
3190 | region->prot |= IOMMU_READ; | ||
3191 | if (entry->prot & IOMMU_PROT_IW) | ||
3192 | region->prot |= IOMMU_WRITE; | ||
3193 | |||
3194 | list_add_tail(®ion->list, head); | 3196 | list_add_tail(®ion->list, head); |
3195 | } | 3197 | } |
3198 | |||
3199 | region = iommu_alloc_resv_region(MSI_RANGE_START, | ||
3200 | MSI_RANGE_END - MSI_RANGE_START + 1, | ||
3201 | 0, IOMMU_RESV_RESERVED); | ||
3202 | if (!region) | ||
3203 | return; | ||
3204 | list_add_tail(®ion->list, head); | ||
3205 | |||
3206 | region = iommu_alloc_resv_region(HT_RANGE_START, | ||
3207 | HT_RANGE_END - HT_RANGE_START + 1, | ||
3208 | 0, IOMMU_RESV_RESERVED); | ||
3209 | if (!region) | ||
3210 | return; | ||
3211 | list_add_tail(®ion->list, head); | ||
3196 | } | 3212 | } |
3197 | 3213 | ||
3198 | static void amd_iommu_put_dm_regions(struct device *dev, | 3214 | static void amd_iommu_put_resv_regions(struct device *dev, |
3199 | struct list_head *head) | 3215 | struct list_head *head) |
3200 | { | 3216 | { |
3201 | struct iommu_dm_region *entry, *next; | 3217 | struct iommu_resv_region *entry, *next; |
3202 | 3218 | ||
3203 | list_for_each_entry_safe(entry, next, head, list) | 3219 | list_for_each_entry_safe(entry, next, head, list) |
3204 | kfree(entry); | 3220 | kfree(entry); |
3205 | } | 3221 | } |
3206 | 3222 | ||
3207 | static void amd_iommu_apply_dm_region(struct device *dev, | 3223 | static void amd_iommu_apply_resv_region(struct device *dev, |
3208 | struct iommu_domain *domain, | 3224 | struct iommu_domain *domain, |
3209 | struct iommu_dm_region *region) | 3225 | struct iommu_resv_region *region) |
3210 | { | 3226 | { |
3211 | struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); | 3227 | struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); |
3212 | unsigned long start, end; | 3228 | unsigned long start, end; |
@@ -3230,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = { | |||
3230 | .add_device = amd_iommu_add_device, | 3246 | .add_device = amd_iommu_add_device, |
3231 | .remove_device = amd_iommu_remove_device, | 3247 | .remove_device = amd_iommu_remove_device, |
3232 | .device_group = amd_iommu_device_group, | 3248 | .device_group = amd_iommu_device_group, |
3233 | .get_dm_regions = amd_iommu_get_dm_regions, | 3249 | .get_resv_regions = amd_iommu_get_resv_regions, |
3234 | .put_dm_regions = amd_iommu_put_dm_regions, | 3250 | .put_resv_regions = amd_iommu_put_resv_regions, |
3235 | .apply_dm_region = amd_iommu_apply_dm_region, | 3251 | .apply_resv_region = amd_iommu_apply_resv_region, |
3236 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, | 3252 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, |
3237 | }; | 3253 | }; |
3238 | 3254 | ||
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 4d6ec444a9d6..d9cf6cb8c6cc 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -412,6 +412,9 @@ | |||
412 | /* High-level queue structures */ | 412 | /* High-level queue structures */ |
413 | #define ARM_SMMU_POLL_TIMEOUT_US 100 | 413 | #define ARM_SMMU_POLL_TIMEOUT_US 100 |
414 | 414 | ||
415 | #define MSI_IOVA_BASE 0x8000000 | ||
416 | #define MSI_IOVA_LENGTH 0x100000 | ||
417 | |||
415 | static bool disable_bypass; | 418 | static bool disable_bypass; |
416 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); | 419 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); |
417 | MODULE_PARM_DESC(disable_bypass, | 420 | MODULE_PARM_DESC(disable_bypass, |
@@ -1372,8 +1375,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) | |||
1372 | switch (cap) { | 1375 | switch (cap) { |
1373 | case IOMMU_CAP_CACHE_COHERENCY: | 1376 | case IOMMU_CAP_CACHE_COHERENCY: |
1374 | return true; | 1377 | return true; |
1375 | case IOMMU_CAP_INTR_REMAP: | ||
1376 | return true; /* MSIs are just memory writes */ | ||
1377 | case IOMMU_CAP_NOEXEC: | 1378 | case IOMMU_CAP_NOEXEC: |
1378 | return true; | 1379 | return true; |
1379 | default: | 1380 | default: |
@@ -1883,6 +1884,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) | |||
1883 | return iommu_fwspec_add_ids(dev, args->args, 1); | 1884 | return iommu_fwspec_add_ids(dev, args->args, 1); |
1884 | } | 1885 | } |
1885 | 1886 | ||
1887 | static void arm_smmu_get_resv_regions(struct device *dev, | ||
1888 | struct list_head *head) | ||
1889 | { | ||
1890 | struct iommu_resv_region *region; | ||
1891 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | ||
1892 | |||
1893 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | ||
1894 | prot, IOMMU_RESV_MSI); | ||
1895 | if (!region) | ||
1896 | return; | ||
1897 | |||
1898 | list_add_tail(®ion->list, head); | ||
1899 | } | ||
1900 | |||
1901 | static void arm_smmu_put_resv_regions(struct device *dev, | ||
1902 | struct list_head *head) | ||
1903 | { | ||
1904 | struct iommu_resv_region *entry, *next; | ||
1905 | |||
1906 | list_for_each_entry_safe(entry, next, head, list) | ||
1907 | kfree(entry); | ||
1908 | } | ||
1909 | |||
1886 | static struct iommu_ops arm_smmu_ops = { | 1910 | static struct iommu_ops arm_smmu_ops = { |
1887 | .capable = arm_smmu_capable, | 1911 | .capable = arm_smmu_capable, |
1888 | .domain_alloc = arm_smmu_domain_alloc, | 1912 | .domain_alloc = arm_smmu_domain_alloc, |
@@ -1898,6 +1922,8 @@ static struct iommu_ops arm_smmu_ops = { | |||
1898 | .domain_get_attr = arm_smmu_domain_get_attr, | 1922 | .domain_get_attr = arm_smmu_domain_get_attr, |
1899 | .domain_set_attr = arm_smmu_domain_set_attr, | 1923 | .domain_set_attr = arm_smmu_domain_set_attr, |
1900 | .of_xlate = arm_smmu_of_xlate, | 1924 | .of_xlate = arm_smmu_of_xlate, |
1925 | .get_resv_regions = arm_smmu_get_resv_regions, | ||
1926 | .put_resv_regions = arm_smmu_put_resv_regions, | ||
1901 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ | 1927 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
1902 | }; | 1928 | }; |
1903 | 1929 | ||
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a60cded8a6ed..13d26009b8e0 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -281,6 +281,9 @@ enum arm_smmu_s2cr_privcfg { | |||
281 | 281 | ||
282 | #define FSYNR0_WNR (1 << 4) | 282 | #define FSYNR0_WNR (1 << 4) |
283 | 283 | ||
284 | #define MSI_IOVA_BASE 0x8000000 | ||
285 | #define MSI_IOVA_LENGTH 0x100000 | ||
286 | |||
284 | static int force_stage; | 287 | static int force_stage; |
285 | module_param(force_stage, int, S_IRUGO); | 288 | module_param(force_stage, int, S_IRUGO); |
286 | MODULE_PARM_DESC(force_stage, | 289 | MODULE_PARM_DESC(force_stage, |
@@ -1371,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) | |||
1371 | * requests. | 1374 | * requests. |
1372 | */ | 1375 | */ |
1373 | return true; | 1376 | return true; |
1374 | case IOMMU_CAP_INTR_REMAP: | ||
1375 | return true; /* MSIs are just memory writes */ | ||
1376 | case IOMMU_CAP_NOEXEC: | 1377 | case IOMMU_CAP_NOEXEC: |
1377 | return true; | 1378 | return true; |
1378 | default: | 1379 | default: |
@@ -1549,6 +1550,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) | |||
1549 | return iommu_fwspec_add_ids(dev, &fwid, 1); | 1550 | return iommu_fwspec_add_ids(dev, &fwid, 1); |
1550 | } | 1551 | } |
1551 | 1552 | ||
1553 | static void arm_smmu_get_resv_regions(struct device *dev, | ||
1554 | struct list_head *head) | ||
1555 | { | ||
1556 | struct iommu_resv_region *region; | ||
1557 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | ||
1558 | |||
1559 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | ||
1560 | prot, IOMMU_RESV_MSI); | ||
1561 | if (!region) | ||
1562 | return; | ||
1563 | |||
1564 | list_add_tail(®ion->list, head); | ||
1565 | } | ||
1566 | |||
1567 | static void arm_smmu_put_resv_regions(struct device *dev, | ||
1568 | struct list_head *head) | ||
1569 | { | ||
1570 | struct iommu_resv_region *entry, *next; | ||
1571 | |||
1572 | list_for_each_entry_safe(entry, next, head, list) | ||
1573 | kfree(entry); | ||
1574 | } | ||
1575 | |||
1552 | static struct iommu_ops arm_smmu_ops = { | 1576 | static struct iommu_ops arm_smmu_ops = { |
1553 | .capable = arm_smmu_capable, | 1577 | .capable = arm_smmu_capable, |
1554 | .domain_alloc = arm_smmu_domain_alloc, | 1578 | .domain_alloc = arm_smmu_domain_alloc, |
@@ -1564,6 +1588,8 @@ static struct iommu_ops arm_smmu_ops = { | |||
1564 | .domain_get_attr = arm_smmu_domain_get_attr, | 1588 | .domain_get_attr = arm_smmu_domain_get_attr, |
1565 | .domain_set_attr = arm_smmu_domain_set_attr, | 1589 | .domain_set_attr = arm_smmu_domain_set_attr, |
1566 | .of_xlate = arm_smmu_of_xlate, | 1590 | .of_xlate = arm_smmu_of_xlate, |
1591 | .get_resv_regions = arm_smmu_get_resv_regions, | ||
1592 | .put_resv_regions = arm_smmu_put_resv_regions, | ||
1567 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ | 1593 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
1568 | }; | 1594 | }; |
1569 | 1595 | ||
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2db0d641cf45..de41ead6542a 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page { | |||
37 | phys_addr_t phys; | 37 | phys_addr_t phys; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | enum iommu_dma_cookie_type { | ||
41 | IOMMU_DMA_IOVA_COOKIE, | ||
42 | IOMMU_DMA_MSI_COOKIE, | ||
43 | }; | ||
44 | |||
40 | struct iommu_dma_cookie { | 45 | struct iommu_dma_cookie { |
41 | struct iova_domain iovad; | 46 | enum iommu_dma_cookie_type type; |
42 | struct list_head msi_page_list; | 47 | union { |
43 | spinlock_t msi_lock; | 48 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ |
49 | struct iova_domain iovad; | ||
50 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | ||
51 | dma_addr_t msi_iova; | ||
52 | }; | ||
53 | struct list_head msi_page_list; | ||
54 | spinlock_t msi_lock; | ||
44 | }; | 55 | }; |
45 | 56 | ||
57 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) | ||
58 | { | ||
59 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | ||
60 | return cookie->iovad.granule; | ||
61 | return PAGE_SIZE; | ||
62 | } | ||
63 | |||
46 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) | 64 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) |
47 | { | 65 | { |
48 | return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; | 66 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
67 | |||
68 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | ||
69 | return &cookie->iovad; | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) | ||
74 | { | ||
75 | struct iommu_dma_cookie *cookie; | ||
76 | |||
77 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | ||
78 | if (cookie) { | ||
79 | spin_lock_init(&cookie->msi_lock); | ||
80 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
81 | cookie->type = type; | ||
82 | } | ||
83 | return cookie; | ||
49 | } | 84 | } |
50 | 85 | ||
51 | int iommu_dma_init(void) | 86 | int iommu_dma_init(void) |
@@ -62,25 +97,53 @@ int iommu_dma_init(void) | |||
62 | */ | 97 | */ |
63 | int iommu_get_dma_cookie(struct iommu_domain *domain) | 98 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
64 | { | 99 | { |
100 | if (domain->iova_cookie) | ||
101 | return -EEXIST; | ||
102 | |||
103 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | ||
104 | if (!domain->iova_cookie) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | EXPORT_SYMBOL(iommu_get_dma_cookie); | ||
110 | |||
111 | /** | ||
112 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | ||
113 | * @domain: IOMMU domain to prepare | ||
114 | * @base: Start address of IOVA region for MSI mappings | ||
115 | * | ||
116 | * Users who manage their own IOVA allocation and do not want DMA API support, | ||
117 | * but would still like to take advantage of automatic MSI remapping, can use | ||
118 | * this to initialise their own domain appropriately. Users should reserve a | ||
119 | * contiguous IOVA region, starting at @base, large enough to accommodate the | ||
120 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | ||
121 | * used by the devices attached to @domain. | ||
122 | */ | ||
123 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | ||
124 | { | ||
65 | struct iommu_dma_cookie *cookie; | 125 | struct iommu_dma_cookie *cookie; |
66 | 126 | ||
127 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | ||
128 | return -EINVAL; | ||
129 | |||
67 | if (domain->iova_cookie) | 130 | if (domain->iova_cookie) |
68 | return -EEXIST; | 131 | return -EEXIST; |
69 | 132 | ||
70 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | 133 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
71 | if (!cookie) | 134 | if (!cookie) |
72 | return -ENOMEM; | 135 | return -ENOMEM; |
73 | 136 | ||
74 | spin_lock_init(&cookie->msi_lock); | 137 | cookie->msi_iova = base; |
75 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
76 | domain->iova_cookie = cookie; | 138 | domain->iova_cookie = cookie; |
77 | return 0; | 139 | return 0; |
78 | } | 140 | } |
79 | EXPORT_SYMBOL(iommu_get_dma_cookie); | 141 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
80 | 142 | ||
81 | /** | 143 | /** |
82 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | 144 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
83 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | 145 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
146 | * iommu_get_msi_cookie() | ||
84 | * | 147 | * |
85 | * IOMMU drivers should normally call this from their domain_free callback. | 148 | * IOMMU drivers should normally call this from their domain_free callback. |
86 | */ | 149 | */ |
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
92 | if (!cookie) | 155 | if (!cookie) |
93 | return; | 156 | return; |
94 | 157 | ||
95 | if (cookie->iovad.granule) | 158 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
96 | put_iova_domain(&cookie->iovad); | 159 | put_iova_domain(&cookie->iovad); |
97 | 160 | ||
98 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | 161 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
@@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, | |||
137 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | 200 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
138 | u64 size, struct device *dev) | 201 | u64 size, struct device *dev) |
139 | { | 202 | { |
140 | struct iova_domain *iovad = cookie_iovad(domain); | 203 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
204 | struct iova_domain *iovad = &cookie->iovad; | ||
141 | unsigned long order, base_pfn, end_pfn; | 205 | unsigned long order, base_pfn, end_pfn; |
142 | 206 | ||
143 | if (!iovad) | 207 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
144 | return -ENODEV; | 208 | return -EINVAL; |
145 | 209 | ||
146 | /* Use the smallest supported page size for IOVA granularity */ | 210 | /* Use the smallest supported page size for IOVA granularity */ |
147 | order = __ffs(domain->pgsize_bitmap); | 211 | order = __ffs(domain->pgsize_bitmap); |
@@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
662 | { | 726 | { |
663 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 727 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
664 | struct iommu_dma_msi_page *msi_page; | 728 | struct iommu_dma_msi_page *msi_page; |
665 | struct iova_domain *iovad = &cookie->iovad; | 729 | struct iova_domain *iovad = cookie_iovad(domain); |
666 | struct iova *iova; | 730 | struct iova *iova; |
667 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | 731 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
732 | size_t size = cookie_msi_granule(cookie); | ||
668 | 733 | ||
669 | msi_addr &= ~(phys_addr_t)iova_mask(iovad); | 734 | msi_addr &= ~(phys_addr_t)(size - 1); |
670 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) | 735 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
671 | if (msi_page->phys == msi_addr) | 736 | if (msi_page->phys == msi_addr) |
672 | return msi_page; | 737 | return msi_page; |
@@ -675,13 +740,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
675 | if (!msi_page) | 740 | if (!msi_page) |
676 | return NULL; | 741 | return NULL; |
677 | 742 | ||
678 | iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); | ||
679 | if (!iova) | ||
680 | goto out_free_page; | ||
681 | |||
682 | msi_page->phys = msi_addr; | 743 | msi_page->phys = msi_addr; |
683 | msi_page->iova = iova_dma_addr(iovad, iova); | 744 | if (iovad) { |
684 | if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) | 745 | iova = __alloc_iova(domain, size, dma_get_mask(dev)); |
746 | if (!iova) | ||
747 | goto out_free_page; | ||
748 | msi_page->iova = iova_dma_addr(iovad, iova); | ||
749 | } else { | ||
750 | msi_page->iova = cookie->msi_iova; | ||
751 | cookie->msi_iova += size; | ||
752 | } | ||
753 | |||
754 | if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) | ||
685 | goto out_free_iova; | 755 | goto out_free_iova; |
686 | 756 | ||
687 | INIT_LIST_HEAD(&msi_page->list); | 757 | INIT_LIST_HEAD(&msi_page->list); |
@@ -689,7 +759,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
689 | return msi_page; | 759 | return msi_page; |
690 | 760 | ||
691 | out_free_iova: | 761 | out_free_iova: |
692 | __free_iova(iovad, iova); | 762 | if (iovad) |
763 | __free_iova(iovad, iova); | ||
764 | else | ||
765 | cookie->msi_iova -= size; | ||
693 | out_free_page: | 766 | out_free_page: |
694 | kfree(msi_page); | 767 | kfree(msi_page); |
695 | return NULL; | 768 | return NULL; |
@@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |||
730 | msg->data = ~0U; | 803 | msg->data = ~0U; |
731 | } else { | 804 | } else { |
732 | msg->address_hi = upper_32_bits(msi_page->iova); | 805 | msg->address_hi = upper_32_bits(msi_page->iova); |
733 | msg->address_lo &= iova_mask(&cookie->iovad); | 806 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
734 | msg->address_lo += lower_32_bits(msi_page->iova); | 807 | msg->address_lo += lower_32_bits(msi_page->iova); |
735 | } | 808 | } |
736 | } | 809 | } |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 8a185250ae5a..bce59a53c2a6 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit { | |||
440 | u64 end_address; /* reserved end address */ | 440 | u64 end_address; /* reserved end address */ |
441 | struct dmar_dev_scope *devices; /* target devices */ | 441 | struct dmar_dev_scope *devices; /* target devices */ |
442 | int devices_cnt; /* target device count */ | 442 | int devices_cnt; /* target device count */ |
443 | struct iommu_resv_region *resv; /* reserved region handle */ | ||
443 | }; | 444 | }; |
444 | 445 | ||
445 | struct dmar_atsr_unit { | 446 | struct dmar_atsr_unit { |
@@ -4246,27 +4247,40 @@ static inline void init_iommu_pm_ops(void) {} | |||
4246 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) | 4247 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) |
4247 | { | 4248 | { |
4248 | struct acpi_dmar_reserved_memory *rmrr; | 4249 | struct acpi_dmar_reserved_memory *rmrr; |
4250 | int prot = DMA_PTE_READ|DMA_PTE_WRITE; | ||
4249 | struct dmar_rmrr_unit *rmrru; | 4251 | struct dmar_rmrr_unit *rmrru; |
4252 | size_t length; | ||
4250 | 4253 | ||
4251 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | 4254 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); |
4252 | if (!rmrru) | 4255 | if (!rmrru) |
4253 | return -ENOMEM; | 4256 | goto out; |
4254 | 4257 | ||
4255 | rmrru->hdr = header; | 4258 | rmrru->hdr = header; |
4256 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 4259 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
4257 | rmrru->base_address = rmrr->base_address; | 4260 | rmrru->base_address = rmrr->base_address; |
4258 | rmrru->end_address = rmrr->end_address; | 4261 | rmrru->end_address = rmrr->end_address; |
4262 | |||
4263 | length = rmrr->end_address - rmrr->base_address + 1; | ||
4264 | rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot, | ||
4265 | IOMMU_RESV_DIRECT); | ||
4266 | if (!rmrru->resv) | ||
4267 | goto free_rmrru; | ||
4268 | |||
4259 | rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), | 4269 | rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), |
4260 | ((void *)rmrr) + rmrr->header.length, | 4270 | ((void *)rmrr) + rmrr->header.length, |
4261 | &rmrru->devices_cnt); | 4271 | &rmrru->devices_cnt); |
4262 | if (rmrru->devices_cnt && rmrru->devices == NULL) { | 4272 | if (rmrru->devices_cnt && rmrru->devices == NULL) |
4263 | kfree(rmrru); | 4273 | goto free_all; |
4264 | return -ENOMEM; | ||
4265 | } | ||
4266 | 4274 | ||
4267 | list_add(&rmrru->list, &dmar_rmrr_units); | 4275 | list_add(&rmrru->list, &dmar_rmrr_units); |
4268 | 4276 | ||
4269 | return 0; | 4277 | return 0; |
4278 | free_all: | ||
4279 | kfree(rmrru->resv); | ||
4280 | free_rmrru: | ||
4281 | kfree(rmrru); | ||
4282 | out: | ||
4283 | return -ENOMEM; | ||
4270 | } | 4284 | } |
4271 | 4285 | ||
4272 | static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) | 4286 | static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) |
@@ -4480,6 +4494,7 @@ static void intel_iommu_free_dmars(void) | |||
4480 | list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { | 4494 | list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { |
4481 | list_del(&rmrru->list); | 4495 | list_del(&rmrru->list); |
4482 | dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); | 4496 | dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); |
4497 | kfree(rmrru->resv); | ||
4483 | kfree(rmrru); | 4498 | kfree(rmrru); |
4484 | } | 4499 | } |
4485 | 4500 | ||
@@ -5203,6 +5218,45 @@ static void intel_iommu_remove_device(struct device *dev) | |||
5203 | iommu_device_unlink(iommu->iommu_dev, dev); | 5218 | iommu_device_unlink(iommu->iommu_dev, dev); |
5204 | } | 5219 | } |
5205 | 5220 | ||
5221 | static void intel_iommu_get_resv_regions(struct device *device, | ||
5222 | struct list_head *head) | ||
5223 | { | ||
5224 | struct iommu_resv_region *reg; | ||
5225 | struct dmar_rmrr_unit *rmrr; | ||
5226 | struct device *i_dev; | ||
5227 | int i; | ||
5228 | |||
5229 | rcu_read_lock(); | ||
5230 | for_each_rmrr_units(rmrr) { | ||
5231 | for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, | ||
5232 | i, i_dev) { | ||
5233 | if (i_dev != device) | ||
5234 | continue; | ||
5235 | |||
5236 | list_add_tail(&rmrr->resv->list, head); | ||
5237 | } | ||
5238 | } | ||
5239 | rcu_read_unlock(); | ||
5240 | |||
5241 | reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, | ||
5242 | IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, | ||
5243 | 0, IOMMU_RESV_RESERVED); | ||
5244 | if (!reg) | ||
5245 | return; | ||
5246 | list_add_tail(®->list, head); | ||
5247 | } | ||
5248 | |||
5249 | static void intel_iommu_put_resv_regions(struct device *dev, | ||
5250 | struct list_head *head) | ||
5251 | { | ||
5252 | struct iommu_resv_region *entry, *next; | ||
5253 | |||
5254 | list_for_each_entry_safe(entry, next, head, list) { | ||
5255 | if (entry->type == IOMMU_RESV_RESERVED) | ||
5256 | kfree(entry); | ||
5257 | } | ||
5258 | } | ||
5259 | |||
5206 | #ifdef CONFIG_INTEL_IOMMU_SVM | 5260 | #ifdef CONFIG_INTEL_IOMMU_SVM |
5207 | #define MAX_NR_PASID_BITS (20) | 5261 | #define MAX_NR_PASID_BITS (20) |
5208 | static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) | 5262 | static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) |
@@ -5333,19 +5387,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) | |||
5333 | #endif /* CONFIG_INTEL_IOMMU_SVM */ | 5387 | #endif /* CONFIG_INTEL_IOMMU_SVM */ |
5334 | 5388 | ||
5335 | static const struct iommu_ops intel_iommu_ops = { | 5389 | static const struct iommu_ops intel_iommu_ops = { |
5336 | .capable = intel_iommu_capable, | 5390 | .capable = intel_iommu_capable, |
5337 | .domain_alloc = intel_iommu_domain_alloc, | 5391 | .domain_alloc = intel_iommu_domain_alloc, |
5338 | .domain_free = intel_iommu_domain_free, | 5392 | .domain_free = intel_iommu_domain_free, |
5339 | .attach_dev = intel_iommu_attach_device, | 5393 | .attach_dev = intel_iommu_attach_device, |
5340 | .detach_dev = intel_iommu_detach_device, | 5394 | .detach_dev = intel_iommu_detach_device, |
5341 | .map = intel_iommu_map, | 5395 | .map = intel_iommu_map, |
5342 | .unmap = intel_iommu_unmap, | 5396 | .unmap = intel_iommu_unmap, |
5343 | .map_sg = default_iommu_map_sg, | 5397 | .map_sg = default_iommu_map_sg, |
5344 | .iova_to_phys = intel_iommu_iova_to_phys, | 5398 | .iova_to_phys = intel_iommu_iova_to_phys, |
5345 | .add_device = intel_iommu_add_device, | 5399 | .add_device = intel_iommu_add_device, |
5346 | .remove_device = intel_iommu_remove_device, | 5400 | .remove_device = intel_iommu_remove_device, |
5347 | .device_group = pci_device_group, | 5401 | .get_resv_regions = intel_iommu_get_resv_regions, |
5348 | .pgsize_bitmap = INTEL_IOMMU_PGSIZES, | 5402 | .put_resv_regions = intel_iommu_put_resv_regions, |
5403 | .device_group = pci_device_group, | ||
5404 | .pgsize_bitmap = INTEL_IOMMU_PGSIZES, | ||
5349 | }; | 5405 | }; |
5350 | 5406 | ||
5351 | static void quirk_iommu_g4x_gfx(struct pci_dev *dev) | 5407 | static void quirk_iommu_g4x_gfx(struct pci_dev *dev) |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index dbe7f653bb7c..f4a176e56e39 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -68,6 +68,12 @@ struct iommu_group_attribute { | |||
68 | const char *buf, size_t count); | 68 | const char *buf, size_t count); |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static const char * const iommu_group_resv_type_string[] = { | ||
72 | [IOMMU_RESV_DIRECT] = "direct", | ||
73 | [IOMMU_RESV_RESERVED] = "reserved", | ||
74 | [IOMMU_RESV_MSI] = "msi", | ||
75 | }; | ||
76 | |||
71 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ | 77 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ |
72 | struct iommu_group_attribute iommu_group_attr_##_name = \ | 78 | struct iommu_group_attribute iommu_group_attr_##_name = \ |
73 | __ATTR(_name, _mode, _show, _store) | 79 | __ATTR(_name, _mode, _show, _store) |
@@ -133,8 +139,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) | |||
133 | return sprintf(buf, "%s\n", group->name); | 139 | return sprintf(buf, "%s\n", group->name); |
134 | } | 140 | } |
135 | 141 | ||
142 | /** | ||
143 | * iommu_insert_resv_region - Insert a new region in the | ||
144 | * list of reserved regions. | ||
145 | * @new: new region to insert | ||
146 | * @regions: list of regions | ||
147 | * | ||
148 | * The new element is sorted by address with respect to the other | ||
149 | * regions of the same type. In case it overlaps with another | ||
150 | * region of the same type, regions are merged. In case it | ||
151 | * overlaps with another region of different type, regions are | ||
152 | * not merged. | ||
153 | */ | ||
154 | static int iommu_insert_resv_region(struct iommu_resv_region *new, | ||
155 | struct list_head *regions) | ||
156 | { | ||
157 | struct iommu_resv_region *region; | ||
158 | phys_addr_t start = new->start; | ||
159 | phys_addr_t end = new->start + new->length - 1; | ||
160 | struct list_head *pos = regions->next; | ||
161 | |||
162 | while (pos != regions) { | ||
163 | struct iommu_resv_region *entry = | ||
164 | list_entry(pos, struct iommu_resv_region, list); | ||
165 | phys_addr_t a = entry->start; | ||
166 | phys_addr_t b = entry->start + entry->length - 1; | ||
167 | int type = entry->type; | ||
168 | |||
169 | if (end < a) { | ||
170 | goto insert; | ||
171 | } else if (start > b) { | ||
172 | pos = pos->next; | ||
173 | } else if ((start >= a) && (end <= b)) { | ||
174 | if (new->type == type) | ||
175 | goto done; | ||
176 | else | ||
177 | pos = pos->next; | ||
178 | } else { | ||
179 | if (new->type == type) { | ||
180 | phys_addr_t new_start = min(a, start); | ||
181 | phys_addr_t new_end = max(b, end); | ||
182 | |||
183 | list_del(&entry->list); | ||
184 | entry->start = new_start; | ||
185 | entry->length = new_end - new_start + 1; | ||
186 | iommu_insert_resv_region(entry, regions); | ||
187 | } else { | ||
188 | pos = pos->next; | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | insert: | ||
193 | region = iommu_alloc_resv_region(new->start, new->length, | ||
194 | new->prot, new->type); | ||
195 | if (!region) | ||
196 | return -ENOMEM; | ||
197 | |||
198 | list_add_tail(®ion->list, pos); | ||
199 | done: | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static int | ||
204 | iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, | ||
205 | struct list_head *group_resv_regions) | ||
206 | { | ||
207 | struct iommu_resv_region *entry; | ||
208 | int ret; | ||
209 | |||
210 | list_for_each_entry(entry, dev_resv_regions, list) { | ||
211 | ret = iommu_insert_resv_region(entry, group_resv_regions); | ||
212 | if (ret) | ||
213 | break; | ||
214 | } | ||
215 | return ret; | ||
216 | } | ||
217 | |||
218 | int iommu_get_group_resv_regions(struct iommu_group *group, | ||
219 | struct list_head *head) | ||
220 | { | ||
221 | struct iommu_device *device; | ||
222 | int ret = 0; | ||
223 | |||
224 | mutex_lock(&group->mutex); | ||
225 | list_for_each_entry(device, &group->devices, list) { | ||
226 | struct list_head dev_resv_regions; | ||
227 | |||
228 | INIT_LIST_HEAD(&dev_resv_regions); | ||
229 | iommu_get_resv_regions(device->dev, &dev_resv_regions); | ||
230 | ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); | ||
231 | iommu_put_resv_regions(device->dev, &dev_resv_regions); | ||
232 | if (ret) | ||
233 | break; | ||
234 | } | ||
235 | mutex_unlock(&group->mutex); | ||
236 | return ret; | ||
237 | } | ||
238 | EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); | ||
239 | |||
240 | static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, | ||
241 | char *buf) | ||
242 | { | ||
243 | struct iommu_resv_region *region, *next; | ||
244 | struct list_head group_resv_regions; | ||
245 | char *str = buf; | ||
246 | |||
247 | INIT_LIST_HEAD(&group_resv_regions); | ||
248 | iommu_get_group_resv_regions(group, &group_resv_regions); | ||
249 | |||
250 | list_for_each_entry_safe(region, next, &group_resv_regions, list) { | ||
251 | str += sprintf(str, "0x%016llx 0x%016llx %s\n", | ||
252 | (long long int)region->start, | ||
253 | (long long int)(region->start + | ||
254 | region->length - 1), | ||
255 | iommu_group_resv_type_string[region->type]); | ||
256 | kfree(region); | ||
257 | } | ||
258 | |||
259 | return (str - buf); | ||
260 | } | ||
261 | |||
136 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); | 262 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); |
137 | 263 | ||
264 | static IOMMU_GROUP_ATTR(reserved_regions, 0444, | ||
265 | iommu_group_show_resv_regions, NULL); | ||
266 | |||
138 | static void iommu_group_release(struct kobject *kobj) | 267 | static void iommu_group_release(struct kobject *kobj) |
139 | { | 268 | { |
140 | struct iommu_group *group = to_iommu_group(kobj); | 269 | struct iommu_group *group = to_iommu_group(kobj); |
@@ -212,6 +341,11 @@ struct iommu_group *iommu_group_alloc(void) | |||
212 | */ | 341 | */ |
213 | kobject_put(&group->kobj); | 342 | kobject_put(&group->kobj); |
214 | 343 | ||
344 | ret = iommu_group_create_file(group, | ||
345 | &iommu_group_attr_reserved_regions); | ||
346 | if (ret) | ||
347 | return ERR_PTR(ret); | ||
348 | |||
215 | pr_debug("Allocated group %d\n", group->id); | 349 | pr_debug("Allocated group %d\n", group->id); |
216 | 350 | ||
217 | return group; | 351 | return group; |
@@ -318,7 +452,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, | |||
318 | struct device *dev) | 452 | struct device *dev) |
319 | { | 453 | { |
320 | struct iommu_domain *domain = group->default_domain; | 454 | struct iommu_domain *domain = group->default_domain; |
321 | struct iommu_dm_region *entry; | 455 | struct iommu_resv_region *entry; |
322 | struct list_head mappings; | 456 | struct list_head mappings; |
323 | unsigned long pg_size; | 457 | unsigned long pg_size; |
324 | int ret = 0; | 458 | int ret = 0; |
@@ -331,18 +465,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, | |||
331 | pg_size = 1UL << __ffs(domain->pgsize_bitmap); | 465 | pg_size = 1UL << __ffs(domain->pgsize_bitmap); |
332 | INIT_LIST_HEAD(&mappings); | 466 | INIT_LIST_HEAD(&mappings); |
333 | 467 | ||
334 | iommu_get_dm_regions(dev, &mappings); | 468 | iommu_get_resv_regions(dev, &mappings); |
335 | 469 | ||
336 | /* We need to consider overlapping regions for different devices */ | 470 | /* We need to consider overlapping regions for different devices */ |
337 | list_for_each_entry(entry, &mappings, list) { | 471 | list_for_each_entry(entry, &mappings, list) { |
338 | dma_addr_t start, end, addr; | 472 | dma_addr_t start, end, addr; |
339 | 473 | ||
340 | if (domain->ops->apply_dm_region) | 474 | if (domain->ops->apply_resv_region) |
341 | domain->ops->apply_dm_region(dev, domain, entry); | 475 | domain->ops->apply_resv_region(dev, domain, entry); |
342 | 476 | ||
343 | start = ALIGN(entry->start, pg_size); | 477 | start = ALIGN(entry->start, pg_size); |
344 | end = ALIGN(entry->start + entry->length, pg_size); | 478 | end = ALIGN(entry->start + entry->length, pg_size); |
345 | 479 | ||
480 | if (entry->type != IOMMU_RESV_DIRECT) | ||
481 | continue; | ||
482 | |||
346 | for (addr = start; addr < end; addr += pg_size) { | 483 | for (addr = start; addr < end; addr += pg_size) { |
347 | phys_addr_t phys_addr; | 484 | phys_addr_t phys_addr; |
348 | 485 | ||
@@ -358,7 +495,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, | |||
358 | } | 495 | } |
359 | 496 | ||
360 | out: | 497 | out: |
361 | iommu_put_dm_regions(dev, &mappings); | 498 | iommu_put_resv_regions(dev, &mappings); |
362 | 499 | ||
363 | return ret; | 500 | return ret; |
364 | } | 501 | } |
@@ -1559,20 +1696,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain, | |||
1559 | } | 1696 | } |
1560 | EXPORT_SYMBOL_GPL(iommu_domain_set_attr); | 1697 | EXPORT_SYMBOL_GPL(iommu_domain_set_attr); |
1561 | 1698 | ||
1562 | void iommu_get_dm_regions(struct device *dev, struct list_head *list) | 1699 | void iommu_get_resv_regions(struct device *dev, struct list_head *list) |
1563 | { | 1700 | { |
1564 | const struct iommu_ops *ops = dev->bus->iommu_ops; | 1701 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
1565 | 1702 | ||
1566 | if (ops && ops->get_dm_regions) | 1703 | if (ops && ops->get_resv_regions) |
1567 | ops->get_dm_regions(dev, list); | 1704 | ops->get_resv_regions(dev, list); |
1568 | } | 1705 | } |
1569 | 1706 | ||
1570 | void iommu_put_dm_regions(struct device *dev, struct list_head *list) | 1707 | void iommu_put_resv_regions(struct device *dev, struct list_head *list) |
1571 | { | 1708 | { |
1572 | const struct iommu_ops *ops = dev->bus->iommu_ops; | 1709 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
1573 | 1710 | ||
1574 | if (ops && ops->put_dm_regions) | 1711 | if (ops && ops->put_resv_regions) |
1575 | ops->put_dm_regions(dev, list); | 1712 | ops->put_resv_regions(dev, list); |
1713 | } | ||
1714 | |||
1715 | struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, | ||
1716 | size_t length, | ||
1717 | int prot, int type) | ||
1718 | { | ||
1719 | struct iommu_resv_region *region; | ||
1720 | |||
1721 | region = kzalloc(sizeof(*region), GFP_KERNEL); | ||
1722 | if (!region) | ||
1723 | return NULL; | ||
1724 | |||
1725 | INIT_LIST_HEAD(®ion->list); | ||
1726 | region->start = start; | ||
1727 | region->length = length; | ||
1728 | region->prot = prot; | ||
1729 | region->type = type; | ||
1730 | return region; | ||
1576 | } | 1731 | } |
1577 | 1732 | ||
1578 | /* Request that a device is direct mapped by the IOMMU */ | 1733 | /* Request that a device is direct mapped by the IOMMU */ |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 69b040f47d56..9d4fefc59827 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1642,6 +1642,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) | |||
1642 | 1642 | ||
1643 | inner_domain->parent = its_parent; | 1643 | inner_domain->parent = its_parent; |
1644 | inner_domain->bus_token = DOMAIN_BUS_NEXUS; | 1644 | inner_domain->bus_token = DOMAIN_BUS_NEXUS; |
1645 | inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; | ||
1645 | info->ops = &its_msi_domain_ops; | 1646 | info->ops = &its_msi_domain_ops; |
1646 | info->data = its; | 1647 | info->data = its; |
1647 | inner_domain->host_data = info; | 1648 | inner_domain->host_data = info; |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index b3cc33fa6d26..0f353f519574 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/mdev.h> | 39 | #include <linux/mdev.h> |
40 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
41 | #include <linux/dma-iommu.h> | ||
42 | #include <linux/irqdomain.h> | ||
41 | 43 | ||
42 | #define DRIVER_VERSION "0.2" | 44 | #define DRIVER_VERSION "0.2" |
43 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | 45 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" |
@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, | |||
1179 | return NULL; | 1181 | return NULL; |
1180 | } | 1182 | } |
1181 | 1183 | ||
1184 | static bool vfio_iommu_has_resv_msi(struct iommu_group *group, | ||
1185 | phys_addr_t *base) | ||
1186 | { | ||
1187 | struct list_head group_resv_regions; | ||
1188 | struct iommu_resv_region *region, *next; | ||
1189 | bool ret = false; | ||
1190 | |||
1191 | INIT_LIST_HEAD(&group_resv_regions); | ||
1192 | iommu_get_group_resv_regions(group, &group_resv_regions); | ||
1193 | list_for_each_entry(region, &group_resv_regions, list) { | ||
1194 | if (region->type & IOMMU_RESV_MSI) { | ||
1195 | *base = region->start; | ||
1196 | ret = true; | ||
1197 | goto out; | ||
1198 | } | ||
1199 | } | ||
1200 | out: | ||
1201 | list_for_each_entry_safe(region, next, &group_resv_regions, list) | ||
1202 | kfree(region); | ||
1203 | return ret; | ||
1204 | } | ||
1205 | |||
1182 | static int vfio_iommu_type1_attach_group(void *iommu_data, | 1206 | static int vfio_iommu_type1_attach_group(void *iommu_data, |
1183 | struct iommu_group *iommu_group) | 1207 | struct iommu_group *iommu_group) |
1184 | { | 1208 | { |
@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
1187 | struct vfio_domain *domain, *d; | 1211 | struct vfio_domain *domain, *d; |
1188 | struct bus_type *bus = NULL, *mdev_bus; | 1212 | struct bus_type *bus = NULL, *mdev_bus; |
1189 | int ret; | 1213 | int ret; |
1214 | bool resv_msi, msi_remap; | ||
1215 | phys_addr_t resv_msi_base; | ||
1190 | 1216 | ||
1191 | mutex_lock(&iommu->lock); | 1217 | mutex_lock(&iommu->lock); |
1192 | 1218 | ||
@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
1256 | if (ret) | 1282 | if (ret) |
1257 | goto out_domain; | 1283 | goto out_domain; |
1258 | 1284 | ||
1285 | resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); | ||
1286 | |||
1259 | INIT_LIST_HEAD(&domain->group_list); | 1287 | INIT_LIST_HEAD(&domain->group_list); |
1260 | list_add(&group->next, &domain->group_list); | 1288 | list_add(&group->next, &domain->group_list); |
1261 | 1289 | ||
1262 | if (!allow_unsafe_interrupts && | 1290 | msi_remap = resv_msi ? irq_domain_check_msi_remap() : |
1263 | !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { | 1291 | iommu_capable(bus, IOMMU_CAP_INTR_REMAP); |
1292 | |||
1293 | if (!allow_unsafe_interrupts && !msi_remap) { | ||
1264 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", | 1294 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", |
1265 | __func__); | 1295 | __func__); |
1266 | ret = -EPERM; | 1296 | ret = -EPERM; |
@@ -1302,6 +1332,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
1302 | if (ret) | 1332 | if (ret) |
1303 | goto out_detach; | 1333 | goto out_detach; |
1304 | 1334 | ||
1335 | if (resv_msi && iommu_get_msi_cookie(domain->domain, resv_msi_base)) | ||
1336 | goto out_detach; | ||
1337 | |||
1305 | list_add(&domain->next, &iommu->domain_list); | 1338 | list_add(&domain->next, &iommu->domain_list); |
1306 | 1339 | ||
1307 | mutex_unlock(&iommu->lock); | 1340 | mutex_unlock(&iommu->lock); |
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 7f7e9a7e3839..28df844a23b6 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
@@ -27,6 +27,7 @@ int iommu_dma_init(void); | |||
27 | 27 | ||
28 | /* Domain management interface for IOMMU drivers */ | 28 | /* Domain management interface for IOMMU drivers */ |
29 | int iommu_get_dma_cookie(struct iommu_domain *domain); | 29 | int iommu_get_dma_cookie(struct iommu_domain *domain); |
30 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); | ||
30 | void iommu_put_dma_cookie(struct iommu_domain *domain); | 31 | void iommu_put_dma_cookie(struct iommu_domain *domain); |
31 | 32 | ||
32 | /* Setup call for arch DMA mapping code */ | 33 | /* Setup call for arch DMA mapping code */ |
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain) | |||
86 | return -ENODEV; | 87 | return -ENODEV; |
87 | } | 88 | } |
88 | 89 | ||
90 | static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | ||
91 | { | ||
92 | return -ENODEV; | ||
93 | } | ||
94 | |||
89 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) | 95 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) |
90 | { | 96 | { |
91 | } | 97 | } |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0ff5111f6959..bec3730dc009 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -117,18 +117,25 @@ enum iommu_attr { | |||
117 | DOMAIN_ATTR_MAX, | 117 | DOMAIN_ATTR_MAX, |
118 | }; | 118 | }; |
119 | 119 | ||
120 | /* These are the possible reserved region types */ | ||
121 | #define IOMMU_RESV_DIRECT (1 << 0) | ||
122 | #define IOMMU_RESV_RESERVED (1 << 1) | ||
123 | #define IOMMU_RESV_MSI (1 << 2) | ||
124 | |||
120 | /** | 125 | /** |
121 | * struct iommu_dm_region - descriptor for a direct mapped memory region | 126 | * struct iommu_resv_region - descriptor for a reserved memory region |
122 | * @list: Linked list pointers | 127 | * @list: Linked list pointers |
123 | * @start: System physical start address of the region | 128 | * @start: System physical start address of the region |
124 | * @length: Length of the region in bytes | 129 | * @length: Length of the region in bytes |
125 | * @prot: IOMMU Protection flags (READ/WRITE/...) | 130 | * @prot: IOMMU Protection flags (READ/WRITE/...) |
131 | * @type: Type of the reserved region | ||
126 | */ | 132 | */ |
127 | struct iommu_dm_region { | 133 | struct iommu_resv_region { |
128 | struct list_head list; | 134 | struct list_head list; |
129 | phys_addr_t start; | 135 | phys_addr_t start; |
130 | size_t length; | 136 | size_t length; |
131 | int prot; | 137 | int prot; |
138 | int type; | ||
132 | }; | 139 | }; |
133 | 140 | ||
134 | #ifdef CONFIG_IOMMU_API | 141 | #ifdef CONFIG_IOMMU_API |
@@ -150,9 +157,9 @@ struct iommu_dm_region { | |||
150 | * @device_group: find iommu group for a particular device | 157 | * @device_group: find iommu group for a particular device |
151 | * @domain_get_attr: Query domain attributes | 158 | * @domain_get_attr: Query domain attributes |
152 | * @domain_set_attr: Change domain attributes | 159 | * @domain_set_attr: Change domain attributes |
153 | * @get_dm_regions: Request list of direct mapping requirements for a device | 160 | * @get_resv_regions: Request list of reserved regions for a device |
154 | * @put_dm_regions: Free list of direct mapping requirements for a device | 161 | * @put_resv_regions: Free list of reserved regions for a device |
155 | * @apply_dm_region: Temporary helper call-back for iova reserved ranges | 162 | * @apply_resv_region: Temporary helper call-back for iova reserved ranges |
156 | * @domain_window_enable: Configure and enable a particular window for a domain | 163 | * @domain_window_enable: Configure and enable a particular window for a domain |
157 | * @domain_window_disable: Disable a particular window for a domain | 164 | * @domain_window_disable: Disable a particular window for a domain |
158 | * @domain_set_windows: Set the number of windows for a domain | 165 | * @domain_set_windows: Set the number of windows for a domain |
@@ -184,11 +191,12 @@ struct iommu_ops { | |||
184 | int (*domain_set_attr)(struct iommu_domain *domain, | 191 | int (*domain_set_attr)(struct iommu_domain *domain, |
185 | enum iommu_attr attr, void *data); | 192 | enum iommu_attr attr, void *data); |
186 | 193 | ||
187 | /* Request/Free a list of direct mapping requirements for a device */ | 194 | /* Request/Free a list of reserved regions for a device */ |
188 | void (*get_dm_regions)(struct device *dev, struct list_head *list); | 195 | void (*get_resv_regions)(struct device *dev, struct list_head *list); |
189 | void (*put_dm_regions)(struct device *dev, struct list_head *list); | 196 | void (*put_resv_regions)(struct device *dev, struct list_head *list); |
190 | void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, | 197 | void (*apply_resv_region)(struct device *dev, |
191 | struct iommu_dm_region *region); | 198 | struct iommu_domain *domain, |
199 | struct iommu_resv_region *region); | ||
192 | 200 | ||
193 | /* Window handling functions */ | 201 | /* Window handling functions */ |
194 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, | 202 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, |
@@ -233,9 +241,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io | |||
233 | extern void iommu_set_fault_handler(struct iommu_domain *domain, | 241 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
234 | iommu_fault_handler_t handler, void *token); | 242 | iommu_fault_handler_t handler, void *token); |
235 | 243 | ||
236 | extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); | 244 | extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); |
237 | extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); | 245 | extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); |
238 | extern int iommu_request_dm_for_dev(struct device *dev); | 246 | extern int iommu_request_dm_for_dev(struct device *dev); |
247 | extern struct iommu_resv_region * | ||
248 | iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); | ||
249 | extern int iommu_get_group_resv_regions(struct iommu_group *group, | ||
250 | struct list_head *head); | ||
239 | 251 | ||
240 | extern int iommu_attach_group(struct iommu_domain *domain, | 252 | extern int iommu_attach_group(struct iommu_domain *domain, |
241 | struct iommu_group *group); | 253 | struct iommu_group *group); |
@@ -443,16 +455,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain, | |||
443 | { | 455 | { |
444 | } | 456 | } |
445 | 457 | ||
446 | static inline void iommu_get_dm_regions(struct device *dev, | 458 | static inline void iommu_get_resv_regions(struct device *dev, |
447 | struct list_head *list) | 459 | struct list_head *list) |
448 | { | 460 | { |
449 | } | 461 | } |
450 | 462 | ||
451 | static inline void iommu_put_dm_regions(struct device *dev, | 463 | static inline void iommu_put_resv_regions(struct device *dev, |
452 | struct list_head *list) | 464 | struct list_head *list) |
453 | { | 465 | { |
454 | } | 466 | } |
455 | 467 | ||
468 | static inline int iommu_get_group_resv_regions(struct iommu_group *group, | ||
469 | struct list_head *head) | ||
470 | { | ||
471 | return -ENODEV; | ||
472 | } | ||
473 | |||
456 | static inline int iommu_request_dm_for_dev(struct device *dev) | 474 | static inline int iommu_request_dm_for_dev(struct device *dev) |
457 | { | 475 | { |
458 | return -ENODEV; | 476 | return -ENODEV; |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index ffb84604c1de..188eced6813e 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -183,6 +183,12 @@ enum { | |||
183 | /* Irq domain is an IPI domain with single virq */ | 183 | /* Irq domain is an IPI domain with single virq */ |
184 | IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), | 184 | IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), |
185 | 185 | ||
186 | /* Irq domain implements MSIs */ | ||
187 | IRQ_DOMAIN_FLAG_MSI = (1 << 4), | ||
188 | |||
189 | /* Irq domain implements MSI remapping */ | ||
190 | IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5), | ||
191 | |||
186 | /* | 192 | /* |
187 | * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved | 193 | * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved |
188 | * for implementation specific purposes and ignored by the | 194 | * for implementation specific purposes and ignored by the |
@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
216 | void *host_data); | 222 | void *host_data); |
217 | extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, | 223 | extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, |
218 | enum irq_domain_bus_token bus_token); | 224 | enum irq_domain_bus_token bus_token); |
225 | extern bool irq_domain_check_msi_remap(void); | ||
219 | extern void irq_set_default_host(struct irq_domain *host); | 226 | extern void irq_set_default_host(struct irq_domain *host); |
220 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, | 227 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, |
221 | irq_hw_number_t hwirq, int node, | 228 | irq_hw_number_t hwirq, int node, |
@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) | |||
446 | { | 453 | { |
447 | return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; | 454 | return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; |
448 | } | 455 | } |
456 | |||
457 | static inline bool irq_domain_is_msi(struct irq_domain *domain) | ||
458 | { | ||
459 | return domain->flags & IRQ_DOMAIN_FLAG_MSI; | ||
460 | } | ||
461 | |||
462 | static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) | ||
463 | { | ||
464 | return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP; | ||
465 | } | ||
466 | |||
467 | extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); | ||
468 | |||
449 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | 469 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ |
450 | static inline void irq_domain_activate_irq(struct irq_data *data) { } | 470 | static inline void irq_domain_activate_irq(struct irq_data *data) { } |
451 | static inline void irq_domain_deactivate_irq(struct irq_data *data) { } | 471 | static inline void irq_domain_deactivate_irq(struct irq_data *data) { } |
@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) | |||
477 | { | 497 | { |
478 | return false; | 498 | return false; |
479 | } | 499 | } |
500 | |||
501 | static inline bool irq_domain_is_msi(struct irq_domain *domain) | ||
502 | { | ||
503 | return false; | ||
504 | } | ||
505 | |||
506 | static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) | ||
507 | { | ||
508 | return false; | ||
509 | } | ||
510 | |||
511 | static inline bool | ||
512 | irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) | ||
513 | { | ||
514 | return false; | ||
515 | } | ||
480 | #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | 516 | #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ |
481 | 517 | ||
482 | #else /* CONFIG_IRQ_DOMAIN */ | 518 | #else /* CONFIG_IRQ_DOMAIN */ |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 8c0a0ae43521..80c4f9312187 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -278,6 +278,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, | |||
278 | EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); | 278 | EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); |
279 | 279 | ||
280 | /** | 280 | /** |
281 | * irq_domain_check_msi_remap - Check whether all MSI irq domains implement | ||
282 | * IRQ remapping | ||
283 | * | ||
284 | * Return: false if any MSI irq domain does not support IRQ remapping, | ||
285 | * true otherwise (including if there is no MSI irq domain) | ||
286 | */ | ||
287 | bool irq_domain_check_msi_remap(void) | ||
288 | { | ||
289 | struct irq_domain *h; | ||
290 | bool ret = true; | ||
291 | |||
292 | mutex_lock(&irq_domain_mutex); | ||
293 | list_for_each_entry(h, &irq_domain_list, link) { | ||
294 | if (irq_domain_is_msi(h) && | ||
295 | !irq_domain_hierarchical_is_msi_remap(h)) { | ||
296 | ret = false; | ||
297 | break; | ||
298 | } | ||
299 | } | ||
300 | mutex_unlock(&irq_domain_mutex); | ||
301 | return ret; | ||
302 | } | ||
303 | EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap); | ||
304 | |||
305 | /** | ||
281 | * irq_set_default_host() - Set a "default" irq domain | 306 | * irq_set_default_host() - Set a "default" irq domain |
282 | * @domain: default domain pointer | 307 | * @domain: default domain pointer |
283 | * | 308 | * |
@@ -1392,6 +1417,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain) | |||
1392 | if (domain->ops->alloc) | 1417 | if (domain->ops->alloc) |
1393 | domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; | 1418 | domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; |
1394 | } | 1419 | } |
1420 | |||
1421 | /** | ||
1422 | * irq_domain_hierarchical_is_msi_remap - Check if the domain or any | ||
1423 | * parent has MSI remapping support | ||
1424 | * @domain: domain pointer | ||
1425 | */ | ||
1426 | bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) | ||
1427 | { | ||
1428 | for (; domain; domain = domain->parent) { | ||
1429 | if (irq_domain_is_msi_remap(domain)) | ||
1430 | return true; | ||
1431 | } | ||
1432 | return false; | ||
1433 | } | ||
1395 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | 1434 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ |
1396 | /** | 1435 | /** |
1397 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain | 1436 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain |
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index ee230063f033..ddc2f5427f75 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c | |||
@@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, | |||
270 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | 270 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) |
271 | msi_domain_update_chip_ops(info); | 271 | msi_domain_update_chip_ops(info); |
272 | 272 | ||
273 | return irq_domain_create_hierarchy(parent, 0, 0, fwnode, | 273 | return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, |
274 | &msi_domain_ops, info); | 274 | fwnode, &msi_domain_ops, info); |
275 | } | 275 | } |
276 | 276 | ||
277 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, | 277 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, |