diff options
author | Eric Auger <eric.auger@redhat.com> | 2017-01-19 15:57:54 -0500 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2017-01-23 06:48:17 -0500 |
commit | 4397f32c03a757acb3e44d268c20233fa1758ed9 (patch) | |
tree | f74597d0c232030c10b1ca4b3bcd92bdea6f5ba9 | |
parent | 0659b8dc45a6b13a4fec73b2ebb51c96b41974c4 (diff) |
iommu/amd: Declare MSI and HT regions as reserved IOVA regions
This patch registers the MSI and HT regions as non mappable
reserved regions. They will be exposed in the iommu-group sysfs.
For direct-mapped regions let's also use iommu_alloc_resv_region().
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | drivers/iommu/amd_iommu.c | 37 |
1 files changed, 26 insertions, 11 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 5f7ea4faa505..d109e41204e8 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -3164,6 +3164,7 @@ static bool amd_iommu_capable(enum iommu_cap cap) | |||
3164 | static void amd_iommu_get_resv_regions(struct device *dev, | 3164 | static void amd_iommu_get_resv_regions(struct device *dev, |
3165 | struct list_head *head) | 3165 | struct list_head *head) |
3166 | { | 3166 | { |
3167 | struct iommu_resv_region *region; | ||
3167 | struct unity_map_entry *entry; | 3168 | struct unity_map_entry *entry; |
3168 | int devid; | 3169 | int devid; |
3169 | 3170 | ||
@@ -3172,28 +3173,42 @@ static void amd_iommu_get_resv_regions(struct device *dev, | |||
3172 | return; | 3173 | return; |
3173 | 3174 | ||
3174 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | 3175 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { |
3175 | struct iommu_resv_region *region; | 3176 | size_t length; |
3177 | int prot = 0; | ||
3176 | 3178 | ||
3177 | if (devid < entry->devid_start || devid > entry->devid_end) | 3179 | if (devid < entry->devid_start || devid > entry->devid_end) |
3178 | continue; | 3180 | continue; |
3179 | 3181 | ||
3180 | region = kzalloc(sizeof(*region), GFP_KERNEL); | 3182 | length = entry->address_end - entry->address_start; |
3183 | if (entry->prot & IOMMU_PROT_IR) | ||
3184 | prot |= IOMMU_READ; | ||
3185 | if (entry->prot & IOMMU_PROT_IW) | ||
3186 | prot |= IOMMU_WRITE; | ||
3187 | |||
3188 | region = iommu_alloc_resv_region(entry->address_start, | ||
3189 | length, prot, | ||
3190 | IOMMU_RESV_DIRECT); | ||
3181 | if (!region) { | 3191 | if (!region) { |
3182 | pr_err("Out of memory allocating dm-regions for %s\n", | 3192 | pr_err("Out of memory allocating dm-regions for %s\n", |
3183 | dev_name(dev)); | 3193 | dev_name(dev)); |
3184 | return; | 3194 | return; |
3185 | } | 3195 | } |
3186 | |||
3187 | region->start = entry->address_start; | ||
3188 | region->length = entry->address_end - entry->address_start; | ||
3189 | region->type = IOMMU_RESV_DIRECT; | ||
3190 | if (entry->prot & IOMMU_PROT_IR) | ||
3191 | region->prot |= IOMMU_READ; | ||
3192 | if (entry->prot & IOMMU_PROT_IW) | ||
3193 | region->prot |= IOMMU_WRITE; | ||
3194 | |||
3195 | list_add_tail(®ion->list, head); | 3196 | list_add_tail(®ion->list, head); |
3196 | } | 3197 | } |
3198 | |||
3199 | region = iommu_alloc_resv_region(MSI_RANGE_START, | ||
3200 | MSI_RANGE_END - MSI_RANGE_START + 1, | ||
3201 | 0, IOMMU_RESV_RESERVED); | ||
3202 | if (!region) | ||
3203 | return; | ||
3204 | list_add_tail(®ion->list, head); | ||
3205 | |||
3206 | region = iommu_alloc_resv_region(HT_RANGE_START, | ||
3207 | HT_RANGE_END - HT_RANGE_START + 1, | ||
3208 | 0, IOMMU_RESV_RESERVED); | ||
3209 | if (!region) | ||
3210 | return; | ||
3211 | list_add_tail(®ion->list, head); | ||
3197 | } | 3212 | } |
3198 | 3213 | ||
3199 | static void amd_iommu_put_resv_regions(struct device *dev, | 3214 | static void amd_iommu_put_resv_regions(struct device *dev, |