diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-20 19:42:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-20 19:42:43 -0500 |
commit | ebb4949eb32ff500602f960525592fc4e614c5a7 (patch) | |
tree | ecbb44a9956ee723b0f20863208bafc812b408a7 | |
parent | 937b5b5ddd2f685b4962ec19502e641bb5741c12 (diff) | |
parent | 8d2932dd0634ebeb0a42df896976772bdb569bfe (diff) |
Merge tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU UPDATES from Joerg Roedel:
- KVM PCIe/MSI passthrough support on ARM/ARM64
- introduction of a core representation for individual hardware iommus
- support for IOMMU privileged mappings as supported by some ARM IOMMUS
- 16-bit SID support for ARM-SMMUv2
- stream table optimization for ARM-SMMUv3
- various fixes and other small improvements
* tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (61 commits)
vfio/type1: Fix error return code in vfio_iommu_type1_attach_group()
iommu: Remove iommu_register_instance interface
iommu/exynos: Make use of iommu_device_register interface
iommu/mediatek: Make use of iommu_device_register interface
iommu/msm: Make use of iommu_device_register interface
iommu/arm-smmu: Make use of the iommu_register interface
iommu: Add iommu_device_set_fwnode() interface
iommu: Make iommu_device_link/unlink take a struct iommu_device
iommu: Add sysfs bindings for struct iommu_device
iommu: Introduce new 'struct iommu_device'
iommu: Rename struct iommu_device
iommu: Rename iommu_get_instance()
iommu: Fix static checker warning in iommu_insert_device_resv_regions
iommu: Avoid unnecessary assignment of dev->iommu_fwspec
iommu/mediatek: Remove bogus 'select' statements
iommu/dma: Remove bogus dma_supported() implementation
iommu/ipmmu-vmsa: Restrict IOMMU Domain Geometry to 32-bit address space
iommu/vt-d: Don't over-free page table directories
iommu/vt-d: Tylersburg isoch identity map check is done too late.
iommu/vt-d: Fix some macros that are incorrectly specified in intel-iommu
...
37 files changed, 1189 insertions, 390 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups index 9b31556cfdda..35c64e00b35c 100644 --- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups +++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups | |||
@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub- | |||
12 | file if the IOMMU driver has chosen to register a more | 12 | file if the IOMMU driver has chosen to register a more |
13 | common name for the group. | 13 | common name for the group. |
14 | Users: | 14 | Users: |
15 | |||
16 | What: /sys/kernel/iommu_groups/reserved_regions | ||
17 | Date: January 2017 | ||
18 | KernelVersion: v4.11 | ||
19 | Contact: Eric Auger <eric.auger@redhat.com> | ||
20 | Description: /sys/kernel/iommu_groups/reserved_regions list IOVA | ||
21 | regions that are reserved. Not necessarily all | ||
22 | reserved regions are listed. This is typically used to | ||
23 | output direct-mapped, MSI, non mappable regions. Each | ||
24 | region is described on a single line: the 1st field is | ||
25 | the base IOVA, the second is the end IOVA and the third | ||
26 | field describes the type of the region. | ||
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt index 98bf7ac29aad..44c6bc496eee 100644 --- a/Documentation/DMA-attributes.txt +++ b/Documentation/DMA-attributes.txt | |||
@@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls | |||
143 | where allocation failures are not a problem, and shouldn't bother the logs. | 143 | where allocation failures are not a problem, and shouldn't bother the logs. |
144 | 144 | ||
145 | NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC. | 145 | NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC. |
146 | |||
147 | DMA_ATTR_PRIVILEGED | ||
148 | ------------------------------ | ||
149 | |||
150 | Some advanced peripherals such as remote processors and GPUs perform | ||
151 | accesses to DMA buffers in both privileged "supervisor" and unprivileged | ||
152 | "user" modes. This attribute is used to indicate to the DMA-mapping | ||
153 | subsystem that the buffer is fully accessible at the elevated privilege | ||
154 | level (and ideally inaccessible or at least read-only at the | ||
155 | lesser-privileged levels). | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ab7710002ba6..82d3e79ec82b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init); | |||
1171 | 1171 | ||
1172 | #ifdef CONFIG_ARM_DMA_USE_IOMMU | 1172 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
1173 | 1173 | ||
1174 | static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) | ||
1175 | { | ||
1176 | int prot = 0; | ||
1177 | |||
1178 | if (attrs & DMA_ATTR_PRIVILEGED) | ||
1179 | prot |= IOMMU_PRIV; | ||
1180 | |||
1181 | switch (dir) { | ||
1182 | case DMA_BIDIRECTIONAL: | ||
1183 | return prot | IOMMU_READ | IOMMU_WRITE; | ||
1184 | case DMA_TO_DEVICE: | ||
1185 | return prot | IOMMU_READ; | ||
1186 | case DMA_FROM_DEVICE: | ||
1187 | return prot | IOMMU_WRITE; | ||
1188 | default: | ||
1189 | return prot; | ||
1190 | } | ||
1191 | } | ||
1192 | |||
1174 | /* IOMMU */ | 1193 | /* IOMMU */ |
1175 | 1194 | ||
1176 | static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); | 1195 | static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); |
@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, | |||
1394 | * Create a mapping in device IO address space for specified pages | 1413 | * Create a mapping in device IO address space for specified pages |
1395 | */ | 1414 | */ |
1396 | static dma_addr_t | 1415 | static dma_addr_t |
1397 | __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) | 1416 | __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, |
1417 | unsigned long attrs) | ||
1398 | { | 1418 | { |
1399 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | 1419 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); |
1400 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 1420 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) | |||
1419 | 1439 | ||
1420 | len = (j - i) << PAGE_SHIFT; | 1440 | len = (j - i) << PAGE_SHIFT; |
1421 | ret = iommu_map(mapping->domain, iova, phys, len, | 1441 | ret = iommu_map(mapping->domain, iova, phys, len, |
1422 | IOMMU_READ|IOMMU_WRITE); | 1442 | __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); |
1423 | if (ret < 0) | 1443 | if (ret < 0) |
1424 | goto fail; | 1444 | goto fail; |
1425 | iova += len; | 1445 | iova += len; |
@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) | |||
1476 | } | 1496 | } |
1477 | 1497 | ||
1478 | static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, | 1498 | static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, |
1479 | dma_addr_t *handle, int coherent_flag) | 1499 | dma_addr_t *handle, int coherent_flag, |
1500 | unsigned long attrs) | ||
1480 | { | 1501 | { |
1481 | struct page *page; | 1502 | struct page *page; |
1482 | void *addr; | 1503 | void *addr; |
@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, | |||
1488 | if (!addr) | 1509 | if (!addr) |
1489 | return NULL; | 1510 | return NULL; |
1490 | 1511 | ||
1491 | *handle = __iommu_create_mapping(dev, &page, size); | 1512 | *handle = __iommu_create_mapping(dev, &page, size, attrs); |
1492 | if (*handle == DMA_ERROR_CODE) | 1513 | if (*handle == DMA_ERROR_CODE) |
1493 | goto err_mapping; | 1514 | goto err_mapping; |
1494 | 1515 | ||
@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1522 | 1543 | ||
1523 | if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) | 1544 | if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) |
1524 | return __iommu_alloc_simple(dev, size, gfp, handle, | 1545 | return __iommu_alloc_simple(dev, size, gfp, handle, |
1525 | coherent_flag); | 1546 | coherent_flag, attrs); |
1526 | 1547 | ||
1527 | /* | 1548 | /* |
1528 | * Following is a work-around (a.k.a. hack) to prevent pages | 1549 | * Following is a work-around (a.k.a. hack) to prevent pages |
@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1537 | if (!pages) | 1558 | if (!pages) |
1538 | return NULL; | 1559 | return NULL; |
1539 | 1560 | ||
1540 | *handle = __iommu_create_mapping(dev, pages, size); | 1561 | *handle = __iommu_create_mapping(dev, pages, size, attrs); |
1541 | if (*handle == DMA_ERROR_CODE) | 1562 | if (*handle == DMA_ERROR_CODE) |
1542 | goto err_buffer; | 1563 | goto err_buffer; |
1543 | 1564 | ||
@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
1672 | GFP_KERNEL); | 1693 | GFP_KERNEL); |
1673 | } | 1694 | } |
1674 | 1695 | ||
1675 | static int __dma_direction_to_prot(enum dma_data_direction dir) | ||
1676 | { | ||
1677 | int prot; | ||
1678 | |||
1679 | switch (dir) { | ||
1680 | case DMA_BIDIRECTIONAL: | ||
1681 | prot = IOMMU_READ | IOMMU_WRITE; | ||
1682 | break; | ||
1683 | case DMA_TO_DEVICE: | ||
1684 | prot = IOMMU_READ; | ||
1685 | break; | ||
1686 | case DMA_FROM_DEVICE: | ||
1687 | prot = IOMMU_WRITE; | ||
1688 | break; | ||
1689 | default: | ||
1690 | prot = 0; | ||
1691 | } | ||
1692 | |||
1693 | return prot; | ||
1694 | } | ||
1695 | |||
1696 | /* | 1696 | /* |
1697 | * Map a part of the scatter-gather list into contiguous io address space | 1697 | * Map a part of the scatter-gather list into contiguous io address space |
1698 | */ | 1698 | */ |
@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | |||
1722 | if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) | 1722 | if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
1723 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | 1723 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); |
1724 | 1724 | ||
1725 | prot = __dma_direction_to_prot(dir); | 1725 | prot = __dma_info_to_prot(dir, attrs); |
1726 | 1726 | ||
1727 | ret = iommu_map(mapping->domain, iova, phys, len, prot); | 1727 | ret = iommu_map(mapping->domain, iova, phys, len, prot); |
1728 | if (ret < 0) | 1728 | if (ret < 0) |
@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p | |||
1930 | if (dma_addr == DMA_ERROR_CODE) | 1930 | if (dma_addr == DMA_ERROR_CODE) |
1931 | return dma_addr; | 1931 | return dma_addr; |
1932 | 1932 | ||
1933 | prot = __dma_direction_to_prot(dir); | 1933 | prot = __dma_info_to_prot(dir, attrs); |
1934 | 1934 | ||
1935 | ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); | 1935 | ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); |
1936 | if (ret < 0) | 1936 | if (ret < 0) |
@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, | |||
2036 | if (dma_addr == DMA_ERROR_CODE) | 2036 | if (dma_addr == DMA_ERROR_CODE) |
2037 | return dma_addr; | 2037 | return dma_addr; |
2038 | 2038 | ||
2039 | prot = __dma_direction_to_prot(dir) | IOMMU_MMIO; | 2039 | prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; |
2040 | 2040 | ||
2041 | ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); | 2041 | ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); |
2042 | if (ret < 0) | 2042 | if (ret < 0) |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index e04082700bb1..4a14b25163fb 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
558 | unsigned long attrs) | 558 | unsigned long attrs) |
559 | { | 559 | { |
560 | bool coherent = is_device_dma_coherent(dev); | 560 | bool coherent = is_device_dma_coherent(dev); |
561 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); | 561 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); |
562 | size_t iosize = size; | 562 | size_t iosize = size; |
563 | void *addr; | 563 | void *addr; |
564 | 564 | ||
@@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, | |||
712 | unsigned long attrs) | 712 | unsigned long attrs) |
713 | { | 713 | { |
714 | bool coherent = is_device_dma_coherent(dev); | 714 | bool coherent = is_device_dma_coherent(dev); |
715 | int prot = dma_direction_to_prot(dir, coherent); | 715 | int prot = dma_info_to_prot(dir, coherent, attrs); |
716 | dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); | 716 | dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); |
717 | 717 | ||
718 | if (!iommu_dma_mapping_error(dev, dev_addr) && | 718 | if (!iommu_dma_mapping_error(dev, dev_addr) && |
@@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
770 | __iommu_sync_sg_for_device(dev, sgl, nelems, dir); | 770 | __iommu_sync_sg_for_device(dev, sgl, nelems, dir); |
771 | 771 | ||
772 | return iommu_dma_map_sg(dev, sgl, nelems, | 772 | return iommu_dma_map_sg(dev, sgl, nelems, |
773 | dma_direction_to_prot(dir, coherent)); | 773 | dma_info_to_prot(dir, coherent, attrs)); |
774 | } | 774 | } |
775 | 775 | ||
776 | static void __iommu_unmap_sg_attrs(struct device *dev, | 776 | static void __iommu_unmap_sg_attrs(struct device *dev, |
@@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = { | |||
799 | .sync_sg_for_device = __iommu_sync_sg_for_device, | 799 | .sync_sg_for_device = __iommu_sync_sg_for_device, |
800 | .map_resource = iommu_dma_map_resource, | 800 | .map_resource = iommu_dma_map_resource, |
801 | .unmap_resource = iommu_dma_unmap_resource, | 801 | .unmap_resource = iommu_dma_unmap_resource, |
802 | .dma_supported = iommu_dma_supported, | ||
803 | .mapping_error = iommu_dma_mapping_error, | 802 | .mapping_error = iommu_dma_mapping_error, |
804 | }; | 803 | }; |
805 | 804 | ||
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index e0d2e6e6e40c..3752521c62ab 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, | |||
536 | if (!iort_fwnode) | 536 | if (!iort_fwnode) |
537 | return NULL; | 537 | return NULL; |
538 | 538 | ||
539 | ops = iommu_get_instance(iort_fwnode); | 539 | ops = iommu_ops_from_fwnode(iort_fwnode); |
540 | if (!ops) | 540 | if (!ops) |
541 | return NULL; | 541 | return NULL; |
542 | 542 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 7539f73df9e0..f37f4978dabb 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -1859,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) | |||
1859 | * Alloc MicroCode buffer for 'chans' Channel threads. | 1859 | * Alloc MicroCode buffer for 'chans' Channel threads. |
1860 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | 1860 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) |
1861 | */ | 1861 | */ |
1862 | pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev, | 1862 | pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev, |
1863 | chans * pl330->mcbufsz, | 1863 | chans * pl330->mcbufsz, |
1864 | &pl330->mcode_bus, GFP_KERNEL); | 1864 | &pl330->mcode_bus, GFP_KERNEL, |
1865 | DMA_ATTR_PRIVILEGED); | ||
1865 | if (!pl330->mcode_cpu) { | 1866 | if (!pl330->mcode_cpu) { |
1866 | dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", | 1867 | dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", |
1867 | __func__, __LINE__); | 1868 | __func__, __LINE__); |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 8ee54d71c7eb..37e204f3d9be 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -352,9 +352,6 @@ config MTK_IOMMU_V1 | |||
352 | select IOMMU_API | 352 | select IOMMU_API |
353 | select MEMORY | 353 | select MEMORY |
354 | select MTK_SMI | 354 | select MTK_SMI |
355 | select COMMON_CLK_MT2701_MMSYS | ||
356 | select COMMON_CLK_MT2701_IMGSYS | ||
357 | select COMMON_CLK_MT2701_VDECSYS | ||
358 | help | 355 | help |
359 | Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is | 356 | Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is |
360 | Multimedia Memory Managememt Unit. This option enables remapping of | 357 | Multimedia Memory Managememt Unit. This option enables remapping of |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 3ef0f42984f2..1b5b8c5361c5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -112,7 +112,7 @@ static struct timer_list queue_timer; | |||
112 | * Domain for untranslated devices - only allocated | 112 | * Domain for untranslated devices - only allocated |
113 | * if iommu=pt passed on kernel cmd line. | 113 | * if iommu=pt passed on kernel cmd line. |
114 | */ | 114 | */ |
115 | static const struct iommu_ops amd_iommu_ops; | 115 | const struct iommu_ops amd_iommu_ops; |
116 | 116 | ||
117 | static ATOMIC_NOTIFIER_HEAD(ppr_notifier); | 117 | static ATOMIC_NOTIFIER_HEAD(ppr_notifier); |
118 | int amd_iommu_max_glx_val = -1; | 118 | int amd_iommu_max_glx_val = -1; |
@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev) | |||
445 | static int iommu_init_device(struct device *dev) | 445 | static int iommu_init_device(struct device *dev) |
446 | { | 446 | { |
447 | struct iommu_dev_data *dev_data; | 447 | struct iommu_dev_data *dev_data; |
448 | struct amd_iommu *iommu; | ||
448 | int devid; | 449 | int devid; |
449 | 450 | ||
450 | if (dev->archdata.iommu) | 451 | if (dev->archdata.iommu) |
@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev) | |||
454 | if (devid < 0) | 455 | if (devid < 0) |
455 | return devid; | 456 | return devid; |
456 | 457 | ||
458 | iommu = amd_iommu_rlookup_table[devid]; | ||
459 | |||
457 | dev_data = find_dev_data(devid); | 460 | dev_data = find_dev_data(devid); |
458 | if (!dev_data) | 461 | if (!dev_data) |
459 | return -ENOMEM; | 462 | return -ENOMEM; |
@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev) | |||
469 | 472 | ||
470 | dev->archdata.iommu = dev_data; | 473 | dev->archdata.iommu = dev_data; |
471 | 474 | ||
472 | iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, | 475 | iommu_device_link(&iommu->iommu, dev); |
473 | dev); | ||
474 | 476 | ||
475 | return 0; | 477 | return 0; |
476 | } | 478 | } |
@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev) | |||
495 | 497 | ||
496 | static void iommu_uninit_device(struct device *dev) | 498 | static void iommu_uninit_device(struct device *dev) |
497 | { | 499 | { |
498 | int devid; | ||
499 | struct iommu_dev_data *dev_data; | 500 | struct iommu_dev_data *dev_data; |
501 | struct amd_iommu *iommu; | ||
502 | int devid; | ||
500 | 503 | ||
501 | devid = get_device_id(dev); | 504 | devid = get_device_id(dev); |
502 | if (devid < 0) | 505 | if (devid < 0) |
503 | return; | 506 | return; |
504 | 507 | ||
508 | iommu = amd_iommu_rlookup_table[devid]; | ||
509 | |||
505 | dev_data = search_dev_data(devid); | 510 | dev_data = search_dev_data(devid); |
506 | if (!dev_data) | 511 | if (!dev_data) |
507 | return; | 512 | return; |
@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev) | |||
509 | if (dev_data->domain) | 514 | if (dev_data->domain) |
510 | detach_device(dev); | 515 | detach_device(dev); |
511 | 516 | ||
512 | iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, | 517 | iommu_device_unlink(&iommu->iommu, dev); |
513 | dev); | ||
514 | 518 | ||
515 | iommu_group_remove_device(dev); | 519 | iommu_group_remove_device(dev); |
516 | 520 | ||
@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap) | |||
3161 | return false; | 3165 | return false; |
3162 | } | 3166 | } |
3163 | 3167 | ||
3164 | static void amd_iommu_get_dm_regions(struct device *dev, | 3168 | static void amd_iommu_get_resv_regions(struct device *dev, |
3165 | struct list_head *head) | 3169 | struct list_head *head) |
3166 | { | 3170 | { |
3171 | struct iommu_resv_region *region; | ||
3167 | struct unity_map_entry *entry; | 3172 | struct unity_map_entry *entry; |
3168 | int devid; | 3173 | int devid; |
3169 | 3174 | ||
@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev, | |||
3172 | return; | 3177 | return; |
3173 | 3178 | ||
3174 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | 3179 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { |
3175 | struct iommu_dm_region *region; | 3180 | size_t length; |
3181 | int prot = 0; | ||
3176 | 3182 | ||
3177 | if (devid < entry->devid_start || devid > entry->devid_end) | 3183 | if (devid < entry->devid_start || devid > entry->devid_end) |
3178 | continue; | 3184 | continue; |
3179 | 3185 | ||
3180 | region = kzalloc(sizeof(*region), GFP_KERNEL); | 3186 | length = entry->address_end - entry->address_start; |
3187 | if (entry->prot & IOMMU_PROT_IR) | ||
3188 | prot |= IOMMU_READ; | ||
3189 | if (entry->prot & IOMMU_PROT_IW) | ||
3190 | prot |= IOMMU_WRITE; | ||
3191 | |||
3192 | region = iommu_alloc_resv_region(entry->address_start, | ||
3193 | length, prot, | ||
3194 | IOMMU_RESV_DIRECT); | ||
3181 | if (!region) { | 3195 | if (!region) { |
3182 | pr_err("Out of memory allocating dm-regions for %s\n", | 3196 | pr_err("Out of memory allocating dm-regions for %s\n", |
3183 | dev_name(dev)); | 3197 | dev_name(dev)); |
3184 | return; | 3198 | return; |
3185 | } | 3199 | } |
3186 | |||
3187 | region->start = entry->address_start; | ||
3188 | region->length = entry->address_end - entry->address_start; | ||
3189 | if (entry->prot & IOMMU_PROT_IR) | ||
3190 | region->prot |= IOMMU_READ; | ||
3191 | if (entry->prot & IOMMU_PROT_IW) | ||
3192 | region->prot |= IOMMU_WRITE; | ||
3193 | |||
3194 | list_add_tail(®ion->list, head); | 3200 | list_add_tail(®ion->list, head); |
3195 | } | 3201 | } |
3202 | |||
3203 | region = iommu_alloc_resv_region(MSI_RANGE_START, | ||
3204 | MSI_RANGE_END - MSI_RANGE_START + 1, | ||
3205 | 0, IOMMU_RESV_RESERVED); | ||
3206 | if (!region) | ||
3207 | return; | ||
3208 | list_add_tail(®ion->list, head); | ||
3209 | |||
3210 | region = iommu_alloc_resv_region(HT_RANGE_START, | ||
3211 | HT_RANGE_END - HT_RANGE_START + 1, | ||
3212 | 0, IOMMU_RESV_RESERVED); | ||
3213 | if (!region) | ||
3214 | return; | ||
3215 | list_add_tail(®ion->list, head); | ||
3196 | } | 3216 | } |
3197 | 3217 | ||
3198 | static void amd_iommu_put_dm_regions(struct device *dev, | 3218 | static void amd_iommu_put_resv_regions(struct device *dev, |
3199 | struct list_head *head) | 3219 | struct list_head *head) |
3200 | { | 3220 | { |
3201 | struct iommu_dm_region *entry, *next; | 3221 | struct iommu_resv_region *entry, *next; |
3202 | 3222 | ||
3203 | list_for_each_entry_safe(entry, next, head, list) | 3223 | list_for_each_entry_safe(entry, next, head, list) |
3204 | kfree(entry); | 3224 | kfree(entry); |
3205 | } | 3225 | } |
3206 | 3226 | ||
3207 | static void amd_iommu_apply_dm_region(struct device *dev, | 3227 | static void amd_iommu_apply_resv_region(struct device *dev, |
3208 | struct iommu_domain *domain, | 3228 | struct iommu_domain *domain, |
3209 | struct iommu_dm_region *region) | 3229 | struct iommu_resv_region *region) |
3210 | { | 3230 | { |
3211 | struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); | 3231 | struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); |
3212 | unsigned long start, end; | 3232 | unsigned long start, end; |
@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev, | |||
3217 | WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); | 3237 | WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); |
3218 | } | 3238 | } |
3219 | 3239 | ||
3220 | static const struct iommu_ops amd_iommu_ops = { | 3240 | const struct iommu_ops amd_iommu_ops = { |
3221 | .capable = amd_iommu_capable, | 3241 | .capable = amd_iommu_capable, |
3222 | .domain_alloc = amd_iommu_domain_alloc, | 3242 | .domain_alloc = amd_iommu_domain_alloc, |
3223 | .domain_free = amd_iommu_domain_free, | 3243 | .domain_free = amd_iommu_domain_free, |
@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = { | |||
3230 | .add_device = amd_iommu_add_device, | 3250 | .add_device = amd_iommu_add_device, |
3231 | .remove_device = amd_iommu_remove_device, | 3251 | .remove_device = amd_iommu_remove_device, |
3232 | .device_group = amd_iommu_device_group, | 3252 | .device_group = amd_iommu_device_group, |
3233 | .get_dm_regions = amd_iommu_get_dm_regions, | 3253 | .get_resv_regions = amd_iommu_get_resv_regions, |
3234 | .put_dm_regions = amd_iommu_put_dm_regions, | 3254 | .put_resv_regions = amd_iommu_put_resv_regions, |
3235 | .apply_dm_region = amd_iommu_apply_dm_region, | 3255 | .apply_resv_region = amd_iommu_apply_resv_region, |
3236 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, | 3256 | .pgsize_bitmap = AMD_IOMMU_PGSIZES, |
3237 | }; | 3257 | }; |
3238 | 3258 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 6799cf9713f7..04cdac7ab3e3 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -94,6 +94,8 @@ | |||
94 | * out of it. | 94 | * out of it. |
95 | */ | 95 | */ |
96 | 96 | ||
97 | extern const struct iommu_ops amd_iommu_ops; | ||
98 | |||
97 | /* | 99 | /* |
98 | * structure describing one IOMMU in the ACPI table. Typically followed by one | 100 | * structure describing one IOMMU in the ACPI table. Typically followed by one |
99 | * or more ivhd_entrys. | 101 | * or more ivhd_entrys. |
@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu) | |||
1635 | amd_iommu_erratum_746_workaround(iommu); | 1637 | amd_iommu_erratum_746_workaround(iommu); |
1636 | amd_iommu_ats_write_check_workaround(iommu); | 1638 | amd_iommu_ats_write_check_workaround(iommu); |
1637 | 1639 | ||
1638 | iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, | 1640 | iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, |
1639 | amd_iommu_groups, "ivhd%d", | 1641 | amd_iommu_groups, "ivhd%d", iommu->index); |
1640 | iommu->index); | 1642 | iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); |
1643 | iommu_device_register(&iommu->iommu); | ||
1641 | 1644 | ||
1642 | return pci_enable_device(iommu->dev); | 1645 | return pci_enable_device(iommu->dev); |
1643 | } | 1646 | } |
@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void) | |||
2230 | */ | 2233 | */ |
2231 | ret = check_ivrs_checksum(ivrs_base); | 2234 | ret = check_ivrs_checksum(ivrs_base); |
2232 | if (ret) | 2235 | if (ret) |
2233 | return ret; | 2236 | goto out; |
2234 | 2237 | ||
2235 | amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); | 2238 | amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); |
2236 | DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); | 2239 | DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 0d91785ebdc3..af00f381a7b1 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -535,8 +535,8 @@ struct amd_iommu { | |||
535 | /* if one, we need to send a completion wait command */ | 535 | /* if one, we need to send a completion wait command */ |
536 | bool need_sync; | 536 | bool need_sync; |
537 | 537 | ||
538 | /* IOMMU sysfs device */ | 538 | /* Handle for IOMMU core code */ |
539 | struct device *iommu_dev; | 539 | struct iommu_device iommu; |
540 | 540 | ||
541 | /* | 541 | /* |
542 | * We can't rely on the BIOS to restore all values on reinit, so we | 542 | * We can't rely on the BIOS to restore all values on reinit, so we |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 4d6ec444a9d6..5806a6acc94e 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -269,9 +269,6 @@ | |||
269 | #define STRTAB_STE_1_SHCFG_INCOMING 1UL | 269 | #define STRTAB_STE_1_SHCFG_INCOMING 1UL |
270 | #define STRTAB_STE_1_SHCFG_SHIFT 44 | 270 | #define STRTAB_STE_1_SHCFG_SHIFT 44 |
271 | 271 | ||
272 | #define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL | ||
273 | #define STRTAB_STE_1_PRIVCFG_SHIFT 48 | ||
274 | |||
275 | #define STRTAB_STE_2_S2VMID_SHIFT 0 | 272 | #define STRTAB_STE_2_S2VMID_SHIFT 0 |
276 | #define STRTAB_STE_2_S2VMID_MASK 0xffffUL | 273 | #define STRTAB_STE_2_S2VMID_MASK 0xffffUL |
277 | #define STRTAB_STE_2_VTCR_SHIFT 32 | 274 | #define STRTAB_STE_2_VTCR_SHIFT 32 |
@@ -412,6 +409,9 @@ | |||
412 | /* High-level queue structures */ | 409 | /* High-level queue structures */ |
413 | #define ARM_SMMU_POLL_TIMEOUT_US 100 | 410 | #define ARM_SMMU_POLL_TIMEOUT_US 100 |
414 | 411 | ||
412 | #define MSI_IOVA_BASE 0x8000000 | ||
413 | #define MSI_IOVA_LENGTH 0x100000 | ||
414 | |||
415 | static bool disable_bypass; | 415 | static bool disable_bypass; |
416 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); | 416 | module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); |
417 | MODULE_PARM_DESC(disable_bypass, | 417 | MODULE_PARM_DESC(disable_bypass, |
@@ -616,6 +616,9 @@ struct arm_smmu_device { | |||
616 | unsigned int sid_bits; | 616 | unsigned int sid_bits; |
617 | 617 | ||
618 | struct arm_smmu_strtab_cfg strtab_cfg; | 618 | struct arm_smmu_strtab_cfg strtab_cfg; |
619 | |||
620 | /* IOMMU core code handle */ | ||
621 | struct iommu_device iommu; | ||
619 | }; | 622 | }; |
620 | 623 | ||
621 | /* SMMU private data for each master */ | 624 | /* SMMU private data for each master */ |
@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1042 | } | 1045 | } |
1043 | } | 1046 | } |
1044 | 1047 | ||
1045 | /* Nuke the existing Config, as we're going to rewrite it */ | 1048 | /* Nuke the existing STE_0 value, as we're going to rewrite it */ |
1046 | val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); | 1049 | val = ste->valid ? STRTAB_STE_0_V : 0; |
1047 | |||
1048 | if (ste->valid) | ||
1049 | val |= STRTAB_STE_0_V; | ||
1050 | else | ||
1051 | val &= ~STRTAB_STE_0_V; | ||
1052 | 1050 | ||
1053 | if (ste->bypass) { | 1051 | if (ste->bypass) { |
1054 | val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT | 1052 | val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT |
@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1073 | #ifdef CONFIG_PCI_ATS | 1071 | #ifdef CONFIG_PCI_ATS |
1074 | STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | | 1072 | STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | |
1075 | #endif | 1073 | #endif |
1076 | STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT | | 1074 | STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); |
1077 | STRTAB_STE_1_PRIVCFG_UNPRIV << | ||
1078 | STRTAB_STE_1_PRIVCFG_SHIFT); | ||
1079 | 1075 | ||
1080 | if (smmu->features & ARM_SMMU_FEAT_STALLS) | 1076 | if (smmu->features & ARM_SMMU_FEAT_STALLS) |
1081 | dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); | 1077 | dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); |
@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1083 | val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK | 1079 | val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK |
1084 | << STRTAB_STE_0_S1CTXPTR_SHIFT) | | 1080 | << STRTAB_STE_0_S1CTXPTR_SHIFT) | |
1085 | STRTAB_STE_0_CFG_S1_TRANS; | 1081 | STRTAB_STE_0_CFG_S1_TRANS; |
1086 | |||
1087 | } | 1082 | } |
1088 | 1083 | ||
1089 | if (ste->s2_cfg) { | 1084 | if (ste->s2_cfg) { |
@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) | |||
1372 | switch (cap) { | 1367 | switch (cap) { |
1373 | case IOMMU_CAP_CACHE_COHERENCY: | 1368 | case IOMMU_CAP_CACHE_COHERENCY: |
1374 | return true; | 1369 | return true; |
1375 | case IOMMU_CAP_INTR_REMAP: | ||
1376 | return true; /* MSIs are just memory writes */ | ||
1377 | case IOMMU_CAP_NOEXEC: | 1370 | case IOMMU_CAP_NOEXEC: |
1378 | return true; | 1371 | return true; |
1379 | default: | 1372 | default: |
@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev) | |||
1795 | } | 1788 | } |
1796 | 1789 | ||
1797 | group = iommu_group_get_for_dev(dev); | 1790 | group = iommu_group_get_for_dev(dev); |
1798 | if (!IS_ERR(group)) | 1791 | if (!IS_ERR(group)) { |
1799 | iommu_group_put(group); | 1792 | iommu_group_put(group); |
1793 | iommu_device_link(&smmu->iommu, dev); | ||
1794 | } | ||
1800 | 1795 | ||
1801 | return PTR_ERR_OR_ZERO(group); | 1796 | return PTR_ERR_OR_ZERO(group); |
1802 | } | 1797 | } |
@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev) | |||
1805 | { | 1800 | { |
1806 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | 1801 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
1807 | struct arm_smmu_master_data *master; | 1802 | struct arm_smmu_master_data *master; |
1803 | struct arm_smmu_device *smmu; | ||
1808 | 1804 | ||
1809 | if (!fwspec || fwspec->ops != &arm_smmu_ops) | 1805 | if (!fwspec || fwspec->ops != &arm_smmu_ops) |
1810 | return; | 1806 | return; |
1811 | 1807 | ||
1812 | master = fwspec->iommu_priv; | 1808 | master = fwspec->iommu_priv; |
1809 | smmu = master->smmu; | ||
1813 | if (master && master->ste.valid) | 1810 | if (master && master->ste.valid) |
1814 | arm_smmu_detach_dev(dev); | 1811 | arm_smmu_detach_dev(dev); |
1815 | iommu_group_remove_device(dev); | 1812 | iommu_group_remove_device(dev); |
1813 | iommu_device_unlink(&smmu->iommu, dev); | ||
1816 | kfree(master); | 1814 | kfree(master); |
1817 | iommu_fwspec_free(dev); | 1815 | iommu_fwspec_free(dev); |
1818 | } | 1816 | } |
@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) | |||
1883 | return iommu_fwspec_add_ids(dev, args->args, 1); | 1881 | return iommu_fwspec_add_ids(dev, args->args, 1); |
1884 | } | 1882 | } |
1885 | 1883 | ||
1884 | static void arm_smmu_get_resv_regions(struct device *dev, | ||
1885 | struct list_head *head) | ||
1886 | { | ||
1887 | struct iommu_resv_region *region; | ||
1888 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | ||
1889 | |||
1890 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | ||
1891 | prot, IOMMU_RESV_MSI); | ||
1892 | if (!region) | ||
1893 | return; | ||
1894 | |||
1895 | list_add_tail(®ion->list, head); | ||
1896 | } | ||
1897 | |||
1898 | static void arm_smmu_put_resv_regions(struct device *dev, | ||
1899 | struct list_head *head) | ||
1900 | { | ||
1901 | struct iommu_resv_region *entry, *next; | ||
1902 | |||
1903 | list_for_each_entry_safe(entry, next, head, list) | ||
1904 | kfree(entry); | ||
1905 | } | ||
1906 | |||
1886 | static struct iommu_ops arm_smmu_ops = { | 1907 | static struct iommu_ops arm_smmu_ops = { |
1887 | .capable = arm_smmu_capable, | 1908 | .capable = arm_smmu_capable, |
1888 | .domain_alloc = arm_smmu_domain_alloc, | 1909 | .domain_alloc = arm_smmu_domain_alloc, |
@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = { | |||
1898 | .domain_get_attr = arm_smmu_domain_get_attr, | 1919 | .domain_get_attr = arm_smmu_domain_get_attr, |
1899 | .domain_set_attr = arm_smmu_domain_set_attr, | 1920 | .domain_set_attr = arm_smmu_domain_set_attr, |
1900 | .of_xlate = arm_smmu_of_xlate, | 1921 | .of_xlate = arm_smmu_of_xlate, |
1922 | .get_resv_regions = arm_smmu_get_resv_regions, | ||
1923 | .put_resv_regions = arm_smmu_put_resv_regions, | ||
1901 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ | 1924 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
1902 | }; | 1925 | }; |
1903 | 1926 | ||
@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) | |||
1983 | u32 size, l1size; | 2006 | u32 size, l1size; |
1984 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; | 2007 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; |
1985 | 2008 | ||
1986 | /* | 2009 | /* Calculate the L1 size, capped to the SIDSIZE. */ |
1987 | * If we can resolve everything with a single L2 table, then we | 2010 | size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); |
1988 | * just need a single L1 descriptor. Otherwise, calculate the L1 | 2011 | size = min(size, smmu->sid_bits - STRTAB_SPLIT); |
1989 | * size, capped to the SIDSIZE. | ||
1990 | */ | ||
1991 | if (smmu->sid_bits < STRTAB_SPLIT) { | ||
1992 | size = 0; | ||
1993 | } else { | ||
1994 | size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); | ||
1995 | size = min(size, smmu->sid_bits - STRTAB_SPLIT); | ||
1996 | } | ||
1997 | cfg->num_l1_ents = 1 << size; | 2012 | cfg->num_l1_ents = 1 << size; |
1998 | 2013 | ||
1999 | size += STRTAB_SPLIT; | 2014 | size += STRTAB_SPLIT; |
@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) | |||
2504 | smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; | 2519 | smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; |
2505 | smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; | 2520 | smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; |
2506 | 2521 | ||
2522 | /* | ||
2523 | * If the SMMU supports fewer bits than would fill a single L2 stream | ||
2524 | * table, use a linear table instead. | ||
2525 | */ | ||
2526 | if (smmu->sid_bits <= STRTAB_SPLIT) | ||
2527 | smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; | ||
2528 | |||
2507 | /* IDR5 */ | 2529 | /* IDR5 */ |
2508 | reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); | 2530 | reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); |
2509 | 2531 | ||
@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2613 | { | 2635 | { |
2614 | int irq, ret; | 2636 | int irq, ret; |
2615 | struct resource *res; | 2637 | struct resource *res; |
2638 | resource_size_t ioaddr; | ||
2616 | struct arm_smmu_device *smmu; | 2639 | struct arm_smmu_device *smmu; |
2617 | struct device *dev = &pdev->dev; | 2640 | struct device *dev = &pdev->dev; |
2618 | bool bypass; | 2641 | bool bypass; |
@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2630 | dev_err(dev, "MMIO region too small (%pr)\n", res); | 2653 | dev_err(dev, "MMIO region too small (%pr)\n", res); |
2631 | return -EINVAL; | 2654 | return -EINVAL; |
2632 | } | 2655 | } |
2656 | ioaddr = res->start; | ||
2633 | 2657 | ||
2634 | smmu->base = devm_ioremap_resource(dev, res); | 2658 | smmu->base = devm_ioremap_resource(dev, res); |
2635 | if (IS_ERR(smmu->base)) | 2659 | if (IS_ERR(smmu->base)) |
@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2682 | return ret; | 2706 | return ret; |
2683 | 2707 | ||
2684 | /* And we're up. Go go go! */ | 2708 | /* And we're up. Go go go! */ |
2685 | iommu_register_instance(dev->fwnode, &arm_smmu_ops); | 2709 | ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, |
2710 | "smmu3.%pa", &ioaddr); | ||
2711 | if (ret) | ||
2712 | return ret; | ||
2713 | |||
2714 | iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); | ||
2715 | iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); | ||
2716 | |||
2717 | ret = iommu_device_register(&smmu->iommu); | ||
2686 | 2718 | ||
2687 | #ifdef CONFIG_PCI | 2719 | #ifdef CONFIG_PCI |
2688 | if (pci_bus_type.iommu_ops != &arm_smmu_ops) { | 2720 | if (pci_bus_type.iommu_ops != &arm_smmu_ops) { |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a60cded8a6ed..abf6496843a6 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * - v7/v8 long-descriptor format | 24 | * - v7/v8 long-descriptor format |
25 | * - Non-secure access to the SMMU | 25 | * - Non-secure access to the SMMU |
26 | * - Context fault reporting | 26 | * - Context fault reporting |
27 | * - Extended Stream ID (16 bit) | ||
27 | */ | 28 | */ |
28 | 29 | ||
29 | #define pr_fmt(fmt) "arm-smmu: " fmt | 30 | #define pr_fmt(fmt) "arm-smmu: " fmt |
@@ -87,6 +88,7 @@ | |||
87 | #define sCR0_CLIENTPD (1 << 0) | 88 | #define sCR0_CLIENTPD (1 << 0) |
88 | #define sCR0_GFRE (1 << 1) | 89 | #define sCR0_GFRE (1 << 1) |
89 | #define sCR0_GFIE (1 << 2) | 90 | #define sCR0_GFIE (1 << 2) |
91 | #define sCR0_EXIDENABLE (1 << 3) | ||
90 | #define sCR0_GCFGFRE (1 << 4) | 92 | #define sCR0_GCFGFRE (1 << 4) |
91 | #define sCR0_GCFGFIE (1 << 5) | 93 | #define sCR0_GCFGFIE (1 << 5) |
92 | #define sCR0_USFCFG (1 << 10) | 94 | #define sCR0_USFCFG (1 << 10) |
@@ -126,6 +128,7 @@ | |||
126 | #define ID0_NUMIRPT_MASK 0xff | 128 | #define ID0_NUMIRPT_MASK 0xff |
127 | #define ID0_NUMSIDB_SHIFT 9 | 129 | #define ID0_NUMSIDB_SHIFT 9 |
128 | #define ID0_NUMSIDB_MASK 0xf | 130 | #define ID0_NUMSIDB_MASK 0xf |
131 | #define ID0_EXIDS (1 << 8) | ||
129 | #define ID0_NUMSMRG_SHIFT 0 | 132 | #define ID0_NUMSMRG_SHIFT 0 |
130 | #define ID0_NUMSMRG_MASK 0xff | 133 | #define ID0_NUMSMRG_MASK 0xff |
131 | 134 | ||
@@ -169,6 +172,7 @@ | |||
169 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | 172 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) |
170 | #define S2CR_CBNDX_SHIFT 0 | 173 | #define S2CR_CBNDX_SHIFT 0 |
171 | #define S2CR_CBNDX_MASK 0xff | 174 | #define S2CR_CBNDX_MASK 0xff |
175 | #define S2CR_EXIDVALID (1 << 10) | ||
172 | #define S2CR_TYPE_SHIFT 16 | 176 | #define S2CR_TYPE_SHIFT 16 |
173 | #define S2CR_TYPE_MASK 0x3 | 177 | #define S2CR_TYPE_MASK 0x3 |
174 | enum arm_smmu_s2cr_type { | 178 | enum arm_smmu_s2cr_type { |
@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg { | |||
260 | 264 | ||
261 | #define TTBCR2_SEP_SHIFT 15 | 265 | #define TTBCR2_SEP_SHIFT 15 |
262 | #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) | 266 | #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) |
267 | #define TTBCR2_AS (1 << 4) | ||
263 | 268 | ||
264 | #define TTBRn_ASID_SHIFT 48 | 269 | #define TTBRn_ASID_SHIFT 48 |
265 | 270 | ||
@@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg { | |||
281 | 286 | ||
282 | #define FSYNR0_WNR (1 << 4) | 287 | #define FSYNR0_WNR (1 << 4) |
283 | 288 | ||
289 | #define MSI_IOVA_BASE 0x8000000 | ||
290 | #define MSI_IOVA_LENGTH 0x100000 | ||
291 | |||
284 | static int force_stage; | 292 | static int force_stage; |
285 | module_param(force_stage, int, S_IRUGO); | 293 | module_param(force_stage, int, S_IRUGO); |
286 | MODULE_PARM_DESC(force_stage, | 294 | MODULE_PARM_DESC(force_stage, |
@@ -351,6 +359,7 @@ struct arm_smmu_device { | |||
351 | #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) | 359 | #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) |
352 | #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) | 360 | #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) |
353 | #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) | 361 | #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) |
362 | #define ARM_SMMU_FEAT_EXIDS (1 << 12) | ||
354 | u32 features; | 363 | u32 features; |
355 | 364 | ||
356 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | 365 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) |
@@ -380,6 +389,9 @@ struct arm_smmu_device { | |||
380 | unsigned int *irqs; | 389 | unsigned int *irqs; |
381 | 390 | ||
382 | u32 cavium_id_base; /* Specific to Cavium */ | 391 | u32 cavium_id_base; /* Specific to Cavium */ |
392 | |||
393 | /* IOMMU core code handle */ | ||
394 | struct iommu_device iommu; | ||
383 | }; | 395 | }; |
384 | 396 | ||
385 | enum arm_smmu_context_fmt { | 397 | enum arm_smmu_context_fmt { |
@@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
778 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; | 790 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; |
779 | reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | 791 | reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; |
780 | reg2 |= TTBCR2_SEP_UPSTREAM; | 792 | reg2 |= TTBCR2_SEP_UPSTREAM; |
793 | if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) | ||
794 | reg2 |= TTBCR2_AS; | ||
781 | } | 795 | } |
782 | if (smmu->version > ARM_SMMU_V1) | 796 | if (smmu->version > ARM_SMMU_V1) |
783 | writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); | 797 | writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); |
@@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) | |||
1048 | struct arm_smmu_smr *smr = smmu->smrs + idx; | 1062 | struct arm_smmu_smr *smr = smmu->smrs + idx; |
1049 | u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; | 1063 | u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; |
1050 | 1064 | ||
1051 | if (smr->valid) | 1065 | if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) |
1052 | reg |= SMR_VALID; | 1066 | reg |= SMR_VALID; |
1053 | writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); | 1067 | writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); |
1054 | } | 1068 | } |
@@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) | |||
1060 | (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | | 1074 | (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | |
1061 | (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; | 1075 | (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; |
1062 | 1076 | ||
1077 | if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && | ||
1078 | smmu->smrs[idx].valid) | ||
1079 | reg |= S2CR_EXIDVALID; | ||
1063 | writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); | 1080 | writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); |
1064 | } | 1081 | } |
1065 | 1082 | ||
@@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) | |||
1070 | arm_smmu_write_smr(smmu, idx); | 1087 | arm_smmu_write_smr(smmu, idx); |
1071 | } | 1088 | } |
1072 | 1089 | ||
1090 | /* | ||
1091 | * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function | ||
1092 | * should be called after sCR0 is written. | ||
1093 | */ | ||
1094 | static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) | ||
1095 | { | ||
1096 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1097 | u32 smr; | ||
1098 | |||
1099 | if (!smmu->smrs) | ||
1100 | return; | ||
1101 | |||
1102 | /* | ||
1103 | * SMR.ID bits may not be preserved if the corresponding MASK | ||
1104 | * bits are set, so check each one separately. We can reject | ||
1105 | * masters later if they try to claim IDs outside these masks. | ||
1106 | */ | ||
1107 | smr = smmu->streamid_mask << SMR_ID_SHIFT; | ||
1108 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1109 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1110 | smmu->streamid_mask = smr >> SMR_ID_SHIFT; | ||
1111 | |||
1112 | smr = smmu->streamid_mask << SMR_MASK_SHIFT; | ||
1113 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1114 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1115 | smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; | ||
1116 | } | ||
1117 | |||
1073 | static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) | 1118 | static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) |
1074 | { | 1119 | { |
1075 | struct arm_smmu_smr *smrs = smmu->smrs; | 1120 | struct arm_smmu_smr *smrs = smmu->smrs; |
@@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | |||
1214 | continue; | 1259 | continue; |
1215 | 1260 | ||
1216 | s2cr[idx].type = type; | 1261 | s2cr[idx].type = type; |
1217 | s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV; | 1262 | s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT; |
1218 | s2cr[idx].cbndx = cbndx; | 1263 | s2cr[idx].cbndx = cbndx; |
1219 | arm_smmu_write_s2cr(smmu, idx); | 1264 | arm_smmu_write_s2cr(smmu, idx); |
1220 | } | 1265 | } |
@@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) | |||
1371 | * requests. | 1416 | * requests. |
1372 | */ | 1417 | */ |
1373 | return true; | 1418 | return true; |
1374 | case IOMMU_CAP_INTR_REMAP: | ||
1375 | return true; /* MSIs are just memory writes */ | ||
1376 | case IOMMU_CAP_NOEXEC: | 1419 | case IOMMU_CAP_NOEXEC: |
1377 | return true; | 1420 | return true; |
1378 | default: | 1421 | default: |
@@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev) | |||
1444 | if (ret) | 1487 | if (ret) |
1445 | goto out_free; | 1488 | goto out_free; |
1446 | 1489 | ||
1490 | iommu_device_link(&smmu->iommu, dev); | ||
1491 | |||
1447 | return 0; | 1492 | return 0; |
1448 | 1493 | ||
1449 | out_free: | 1494 | out_free: |
@@ -1456,10 +1501,17 @@ out_free: | |||
1456 | static void arm_smmu_remove_device(struct device *dev) | 1501 | static void arm_smmu_remove_device(struct device *dev) |
1457 | { | 1502 | { |
1458 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | 1503 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
1504 | struct arm_smmu_master_cfg *cfg; | ||
1505 | struct arm_smmu_device *smmu; | ||
1506 | |||
1459 | 1507 | ||
1460 | if (!fwspec || fwspec->ops != &arm_smmu_ops) | 1508 | if (!fwspec || fwspec->ops != &arm_smmu_ops) |
1461 | return; | 1509 | return; |
1462 | 1510 | ||
1511 | cfg = fwspec->iommu_priv; | ||
1512 | smmu = cfg->smmu; | ||
1513 | |||
1514 | iommu_device_unlink(&smmu->iommu, dev); | ||
1463 | arm_smmu_master_free_smes(fwspec); | 1515 | arm_smmu_master_free_smes(fwspec); |
1464 | iommu_group_remove_device(dev); | 1516 | iommu_group_remove_device(dev); |
1465 | kfree(fwspec->iommu_priv); | 1517 | kfree(fwspec->iommu_priv); |
@@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) | |||
1549 | return iommu_fwspec_add_ids(dev, &fwid, 1); | 1601 | return iommu_fwspec_add_ids(dev, &fwid, 1); |
1550 | } | 1602 | } |
1551 | 1603 | ||
1604 | static void arm_smmu_get_resv_regions(struct device *dev, | ||
1605 | struct list_head *head) | ||
1606 | { | ||
1607 | struct iommu_resv_region *region; | ||
1608 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | ||
1609 | |||
1610 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | ||
1611 | prot, IOMMU_RESV_MSI); | ||
1612 | if (!region) | ||
1613 | return; | ||
1614 | |||
1615 | list_add_tail(®ion->list, head); | ||
1616 | } | ||
1617 | |||
1618 | static void arm_smmu_put_resv_regions(struct device *dev, | ||
1619 | struct list_head *head) | ||
1620 | { | ||
1621 | struct iommu_resv_region *entry, *next; | ||
1622 | |||
1623 | list_for_each_entry_safe(entry, next, head, list) | ||
1624 | kfree(entry); | ||
1625 | } | ||
1626 | |||
1552 | static struct iommu_ops arm_smmu_ops = { | 1627 | static struct iommu_ops arm_smmu_ops = { |
1553 | .capable = arm_smmu_capable, | 1628 | .capable = arm_smmu_capable, |
1554 | .domain_alloc = arm_smmu_domain_alloc, | 1629 | .domain_alloc = arm_smmu_domain_alloc, |
@@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = { | |||
1564 | .domain_get_attr = arm_smmu_domain_get_attr, | 1639 | .domain_get_attr = arm_smmu_domain_get_attr, |
1565 | .domain_set_attr = arm_smmu_domain_set_attr, | 1640 | .domain_set_attr = arm_smmu_domain_set_attr, |
1566 | .of_xlate = arm_smmu_of_xlate, | 1641 | .of_xlate = arm_smmu_of_xlate, |
1642 | .get_resv_regions = arm_smmu_get_resv_regions, | ||
1643 | .put_resv_regions = arm_smmu_put_resv_regions, | ||
1567 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ | 1644 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
1568 | }; | 1645 | }; |
1569 | 1646 | ||
@@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1648 | if (smmu->features & ARM_SMMU_FEAT_VMID16) | 1725 | if (smmu->features & ARM_SMMU_FEAT_VMID16) |
1649 | reg |= sCR0_VMID16EN; | 1726 | reg |= sCR0_VMID16EN; |
1650 | 1727 | ||
1728 | if (smmu->features & ARM_SMMU_FEAT_EXIDS) | ||
1729 | reg |= sCR0_EXIDENABLE; | ||
1730 | |||
1651 | /* Push the button */ | 1731 | /* Push the button */ |
1652 | __arm_smmu_tlb_sync(smmu); | 1732 | __arm_smmu_tlb_sync(smmu); |
1653 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); | 1733 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
@@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1735 | "\t(IDR0.CTTW overridden by FW configuration)\n"); | 1815 | "\t(IDR0.CTTW overridden by FW configuration)\n"); |
1736 | 1816 | ||
1737 | /* Max. number of entries we have for stream matching/indexing */ | 1817 | /* Max. number of entries we have for stream matching/indexing */ |
1738 | size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); | 1818 | if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) { |
1819 | smmu->features |= ARM_SMMU_FEAT_EXIDS; | ||
1820 | size = 1 << 16; | ||
1821 | } else { | ||
1822 | size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); | ||
1823 | } | ||
1739 | smmu->streamid_mask = size - 1; | 1824 | smmu->streamid_mask = size - 1; |
1740 | if (id & ID0_SMS) { | 1825 | if (id & ID0_SMS) { |
1741 | u32 smr; | ||
1742 | |||
1743 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | 1826 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; |
1744 | size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; | 1827 | size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; |
1745 | if (size == 0) { | 1828 | if (size == 0) { |
@@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1748 | return -ENODEV; | 1831 | return -ENODEV; |
1749 | } | 1832 | } |
1750 | 1833 | ||
1751 | /* | ||
1752 | * SMR.ID bits may not be preserved if the corresponding MASK | ||
1753 | * bits are set, so check each one separately. We can reject | ||
1754 | * masters later if they try to claim IDs outside these masks. | ||
1755 | */ | ||
1756 | smr = smmu->streamid_mask << SMR_ID_SHIFT; | ||
1757 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1758 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1759 | smmu->streamid_mask = smr >> SMR_ID_SHIFT; | ||
1760 | |||
1761 | smr = smmu->streamid_mask << SMR_MASK_SHIFT; | ||
1762 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1763 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1764 | smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; | ||
1765 | |||
1766 | /* Zero-initialised to mark as invalid */ | 1834 | /* Zero-initialised to mark as invalid */ |
1767 | smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), | 1835 | smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), |
1768 | GFP_KERNEL); | 1836 | GFP_KERNEL); |
@@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1770 | return -ENOMEM; | 1838 | return -ENOMEM; |
1771 | 1839 | ||
1772 | dev_notice(smmu->dev, | 1840 | dev_notice(smmu->dev, |
1773 | "\tstream matching with %lu register groups, mask 0x%x", | 1841 | "\tstream matching with %lu register groups", size); |
1774 | size, smmu->smr_mask_mask); | ||
1775 | } | 1842 | } |
1776 | /* s2cr->type == 0 means translation, so initialise explicitly */ | 1843 | /* s2cr->type == 0 means translation, so initialise explicitly */ |
1777 | smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), | 1844 | smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), |
@@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, | |||
2011 | static int arm_smmu_device_probe(struct platform_device *pdev) | 2078 | static int arm_smmu_device_probe(struct platform_device *pdev) |
2012 | { | 2079 | { |
2013 | struct resource *res; | 2080 | struct resource *res; |
2081 | resource_size_t ioaddr; | ||
2014 | struct arm_smmu_device *smmu; | 2082 | struct arm_smmu_device *smmu; |
2015 | struct device *dev = &pdev->dev; | 2083 | struct device *dev = &pdev->dev; |
2016 | int num_irqs, i, err; | 2084 | int num_irqs, i, err; |
@@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2031 | return err; | 2099 | return err; |
2032 | 2100 | ||
2033 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2101 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2102 | ioaddr = res->start; | ||
2034 | smmu->base = devm_ioremap_resource(dev, res); | 2103 | smmu->base = devm_ioremap_resource(dev, res); |
2035 | if (IS_ERR(smmu->base)) | 2104 | if (IS_ERR(smmu->base)) |
2036 | return PTR_ERR(smmu->base); | 2105 | return PTR_ERR(smmu->base); |
@@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev) | |||
2091 | } | 2160 | } |
2092 | } | 2161 | } |
2093 | 2162 | ||
2094 | iommu_register_instance(dev->fwnode, &arm_smmu_ops); | 2163 | err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, |
2164 | "smmu.%pa", &ioaddr); | ||
2165 | if (err) { | ||
2166 | dev_err(dev, "Failed to register iommu in sysfs\n"); | ||
2167 | return err; | ||
2168 | } | ||
2169 | |||
2170 | iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); | ||
2171 | iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); | ||
2172 | |||
2173 | err = iommu_device_register(&smmu->iommu); | ||
2174 | if (err) { | ||
2175 | dev_err(dev, "Failed to register iommu\n"); | ||
2176 | return err; | ||
2177 | } | ||
2178 | |||
2095 | platform_set_drvdata(pdev, smmu); | 2179 | platform_set_drvdata(pdev, smmu); |
2096 | arm_smmu_device_reset(smmu); | 2180 | arm_smmu_device_reset(smmu); |
2181 | arm_smmu_test_smr_masks(smmu); | ||
2097 | 2182 | ||
2098 | /* Oh, for a proper bus abstraction */ | 2183 | /* Oh, for a proper bus abstraction */ |
2099 | if (!iommu_present(&platform_bus_type)) | 2184 | if (!iommu_present(&platform_bus_type)) |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2db0d641cf45..48d36ce59efb 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page { | |||
37 | phys_addr_t phys; | 37 | phys_addr_t phys; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | enum iommu_dma_cookie_type { | ||
41 | IOMMU_DMA_IOVA_COOKIE, | ||
42 | IOMMU_DMA_MSI_COOKIE, | ||
43 | }; | ||
44 | |||
40 | struct iommu_dma_cookie { | 45 | struct iommu_dma_cookie { |
41 | struct iova_domain iovad; | 46 | enum iommu_dma_cookie_type type; |
42 | struct list_head msi_page_list; | 47 | union { |
43 | spinlock_t msi_lock; | 48 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ |
49 | struct iova_domain iovad; | ||
50 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | ||
51 | dma_addr_t msi_iova; | ||
52 | }; | ||
53 | struct list_head msi_page_list; | ||
54 | spinlock_t msi_lock; | ||
44 | }; | 55 | }; |
45 | 56 | ||
57 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) | ||
58 | { | ||
59 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | ||
60 | return cookie->iovad.granule; | ||
61 | return PAGE_SIZE; | ||
62 | } | ||
63 | |||
46 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) | 64 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) |
47 | { | 65 | { |
48 | return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; | 66 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
67 | |||
68 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | ||
69 | return &cookie->iovad; | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) | ||
74 | { | ||
75 | struct iommu_dma_cookie *cookie; | ||
76 | |||
77 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | ||
78 | if (cookie) { | ||
79 | spin_lock_init(&cookie->msi_lock); | ||
80 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
81 | cookie->type = type; | ||
82 | } | ||
83 | return cookie; | ||
49 | } | 84 | } |
50 | 85 | ||
51 | int iommu_dma_init(void) | 86 | int iommu_dma_init(void) |
@@ -62,25 +97,53 @@ int iommu_dma_init(void) | |||
62 | */ | 97 | */ |
63 | int iommu_get_dma_cookie(struct iommu_domain *domain) | 98 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
64 | { | 99 | { |
100 | if (domain->iova_cookie) | ||
101 | return -EEXIST; | ||
102 | |||
103 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | ||
104 | if (!domain->iova_cookie) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | EXPORT_SYMBOL(iommu_get_dma_cookie); | ||
110 | |||
111 | /** | ||
112 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | ||
113 | * @domain: IOMMU domain to prepare | ||
114 | * @base: Start address of IOVA region for MSI mappings | ||
115 | * | ||
116 | * Users who manage their own IOVA allocation and do not want DMA API support, | ||
117 | * but would still like to take advantage of automatic MSI remapping, can use | ||
118 | * this to initialise their own domain appropriately. Users should reserve a | ||
119 | * contiguous IOVA region, starting at @base, large enough to accommodate the | ||
120 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | ||
121 | * used by the devices attached to @domain. | ||
122 | */ | ||
123 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | ||
124 | { | ||
65 | struct iommu_dma_cookie *cookie; | 125 | struct iommu_dma_cookie *cookie; |
66 | 126 | ||
127 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | ||
128 | return -EINVAL; | ||
129 | |||
67 | if (domain->iova_cookie) | 130 | if (domain->iova_cookie) |
68 | return -EEXIST; | 131 | return -EEXIST; |
69 | 132 | ||
70 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | 133 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
71 | if (!cookie) | 134 | if (!cookie) |
72 | return -ENOMEM; | 135 | return -ENOMEM; |
73 | 136 | ||
74 | spin_lock_init(&cookie->msi_lock); | 137 | cookie->msi_iova = base; |
75 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
76 | domain->iova_cookie = cookie; | 138 | domain->iova_cookie = cookie; |
77 | return 0; | 139 | return 0; |
78 | } | 140 | } |
79 | EXPORT_SYMBOL(iommu_get_dma_cookie); | 141 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
80 | 142 | ||
81 | /** | 143 | /** |
82 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | 144 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
83 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | 145 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
146 | * iommu_get_msi_cookie() | ||
84 | * | 147 | * |
85 | * IOMMU drivers should normally call this from their domain_free callback. | 148 | * IOMMU drivers should normally call this from their domain_free callback. |
86 | */ | 149 | */ |
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
92 | if (!cookie) | 155 | if (!cookie) |
93 | return; | 156 | return; |
94 | 157 | ||
95 | if (cookie->iovad.granule) | 158 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
96 | put_iova_domain(&cookie->iovad); | 159 | put_iova_domain(&cookie->iovad); |
97 | 160 | ||
98 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | 161 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
@@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, | |||
137 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | 200 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
138 | u64 size, struct device *dev) | 201 | u64 size, struct device *dev) |
139 | { | 202 | { |
140 | struct iova_domain *iovad = cookie_iovad(domain); | 203 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
204 | struct iova_domain *iovad = &cookie->iovad; | ||
141 | unsigned long order, base_pfn, end_pfn; | 205 | unsigned long order, base_pfn, end_pfn; |
206 | bool pci = dev && dev_is_pci(dev); | ||
142 | 207 | ||
143 | if (!iovad) | 208 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
144 | return -ENODEV; | 209 | return -EINVAL; |
145 | 210 | ||
146 | /* Use the smallest supported page size for IOVA granularity */ | 211 | /* Use the smallest supported page size for IOVA granularity */ |
147 | order = __ffs(domain->pgsize_bitmap); | 212 | order = __ffs(domain->pgsize_bitmap); |
@@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | |||
161 | end_pfn = min_t(unsigned long, end_pfn, | 226 | end_pfn = min_t(unsigned long, end_pfn, |
162 | domain->geometry.aperture_end >> order); | 227 | domain->geometry.aperture_end >> order); |
163 | } | 228 | } |
229 | /* | ||
230 | * PCI devices may have larger DMA masks, but still prefer allocating | ||
231 | * within a 32-bit mask to avoid DAC addressing. Such limitations don't | ||
232 | * apply to the typical platform device, so for those we may as well | ||
233 | * leave the cache limit at the top of their range to save an rb_last() | ||
234 | * traversal on every allocation. | ||
235 | */ | ||
236 | if (pci) | ||
237 | end_pfn &= DMA_BIT_MASK(32) >> order; | ||
164 | 238 | ||
165 | /* All we can safely do with an existing domain is enlarge it */ | 239 | /* start_pfn is always nonzero for an already-initialised domain */ |
166 | if (iovad->start_pfn) { | 240 | if (iovad->start_pfn) { |
167 | if (1UL << order != iovad->granule || | 241 | if (1UL << order != iovad->granule || |
168 | base_pfn != iovad->start_pfn || | 242 | base_pfn != iovad->start_pfn) { |
169 | end_pfn < iovad->dma_32bit_pfn) { | ||
170 | pr_warn("Incompatible range for DMA domain\n"); | 243 | pr_warn("Incompatible range for DMA domain\n"); |
171 | return -EFAULT; | 244 | return -EFAULT; |
172 | } | 245 | } |
173 | iovad->dma_32bit_pfn = end_pfn; | 246 | /* |
247 | * If we have devices with different DMA masks, move the free | ||
248 | * area cache limit down for the benefit of the smaller one. | ||
249 | */ | ||
250 | iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); | ||
174 | } else { | 251 | } else { |
175 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); | 252 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); |
176 | if (dev && dev_is_pci(dev)) | 253 | if (pci) |
177 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); | 254 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
178 | } | 255 | } |
179 | return 0; | 256 | return 0; |
@@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | |||
181 | EXPORT_SYMBOL(iommu_dma_init_domain); | 258 | EXPORT_SYMBOL(iommu_dma_init_domain); |
182 | 259 | ||
183 | /** | 260 | /** |
184 | * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags | 261 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
262 | * page flags. | ||
185 | * @dir: Direction of DMA transfer | 263 | * @dir: Direction of DMA transfer |
186 | * @coherent: Is the DMA master cache-coherent? | 264 | * @coherent: Is the DMA master cache-coherent? |
265 | * @attrs: DMA attributes for the mapping | ||
187 | * | 266 | * |
188 | * Return: corresponding IOMMU API page protection flags | 267 | * Return: corresponding IOMMU API page protection flags |
189 | */ | 268 | */ |
190 | int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | 269 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
270 | unsigned long attrs) | ||
191 | { | 271 | { |
192 | int prot = coherent ? IOMMU_CACHE : 0; | 272 | int prot = coherent ? IOMMU_CACHE : 0; |
193 | 273 | ||
274 | if (attrs & DMA_ATTR_PRIVILEGED) | ||
275 | prot |= IOMMU_PRIV; | ||
276 | |||
194 | switch (dir) { | 277 | switch (dir) { |
195 | case DMA_BIDIRECTIONAL: | 278 | case DMA_BIDIRECTIONAL: |
196 | return prot | IOMMU_READ | IOMMU_WRITE; | 279 | return prot | IOMMU_READ | IOMMU_WRITE; |
@@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |||
204 | } | 287 | } |
205 | 288 | ||
206 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, | 289 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
207 | dma_addr_t dma_limit) | 290 | dma_addr_t dma_limit, struct device *dev) |
208 | { | 291 | { |
209 | struct iova_domain *iovad = cookie_iovad(domain); | 292 | struct iova_domain *iovad = cookie_iovad(domain); |
210 | unsigned long shift = iova_shift(iovad); | 293 | unsigned long shift = iova_shift(iovad); |
211 | unsigned long length = iova_align(iovad, size) >> shift; | 294 | unsigned long length = iova_align(iovad, size) >> shift; |
295 | struct iova *iova = NULL; | ||
212 | 296 | ||
213 | if (domain->geometry.force_aperture) | 297 | if (domain->geometry.force_aperture) |
214 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | 298 | dma_limit = min(dma_limit, domain->geometry.aperture_end); |
299 | |||
300 | /* Try to get PCI devices a SAC address */ | ||
301 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | ||
302 | iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift, | ||
303 | true); | ||
215 | /* | 304 | /* |
216 | * Enforce size-alignment to be safe - there could perhaps be an | 305 | * Enforce size-alignment to be safe - there could perhaps be an |
217 | * attribute to control this per-device, or at least per-domain... | 306 | * attribute to control this per-device, or at least per-domain... |
218 | */ | 307 | */ |
219 | return alloc_iova(iovad, length, dma_limit >> shift, true); | 308 | if (!iova) |
309 | iova = alloc_iova(iovad, length, dma_limit >> shift, true); | ||
310 | |||
311 | return iova; | ||
220 | } | 312 | } |
221 | 313 | ||
222 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ | 314 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ |
@@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
369 | if (!pages) | 461 | if (!pages) |
370 | return NULL; | 462 | return NULL; |
371 | 463 | ||
372 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask); | 464 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev); |
373 | if (!iova) | 465 | if (!iova) |
374 | goto out_free_pages; | 466 | goto out_free_pages; |
375 | 467 | ||
@@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, | |||
440 | struct iova_domain *iovad = cookie_iovad(domain); | 532 | struct iova_domain *iovad = cookie_iovad(domain); |
441 | size_t iova_off = iova_offset(iovad, phys); | 533 | size_t iova_off = iova_offset(iovad, phys); |
442 | size_t len = iova_align(iovad, size + iova_off); | 534 | size_t len = iova_align(iovad, size + iova_off); |
443 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); | 535 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev); |
444 | 536 | ||
445 | if (!iova) | 537 | if (!iova) |
446 | return DMA_ERROR_CODE; | 538 | return DMA_ERROR_CODE; |
@@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
598 | prev = s; | 690 | prev = s; |
599 | } | 691 | } |
600 | 692 | ||
601 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); | 693 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
602 | if (!iova) | 694 | if (!iova) |
603 | goto out_restore_sg; | 695 | goto out_restore_sg; |
604 | 696 | ||
@@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, | |||
633 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 725 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
634 | { | 726 | { |
635 | return __iommu_dma_map(dev, phys, size, | 727 | return __iommu_dma_map(dev, phys, size, |
636 | dma_direction_to_prot(dir, false) | IOMMU_MMIO); | 728 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
637 | } | 729 | } |
638 | 730 | ||
639 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | 731 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
@@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | |||
642 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); | 734 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); |
643 | } | 735 | } |
644 | 736 | ||
645 | int iommu_dma_supported(struct device *dev, u64 mask) | ||
646 | { | ||
647 | /* | ||
648 | * 'Special' IOMMUs which don't have the same addressing capability | ||
649 | * as the CPU will have to wait until we have some way to query that | ||
650 | * before they'll be able to use this framework. | ||
651 | */ | ||
652 | return 1; | ||
653 | } | ||
654 | |||
655 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 737 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
656 | { | 738 | { |
657 | return dma_addr == DMA_ERROR_CODE; | 739 | return dma_addr == DMA_ERROR_CODE; |
@@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
662 | { | 744 | { |
663 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 745 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
664 | struct iommu_dma_msi_page *msi_page; | 746 | struct iommu_dma_msi_page *msi_page; |
665 | struct iova_domain *iovad = &cookie->iovad; | 747 | struct iova_domain *iovad = cookie_iovad(domain); |
666 | struct iova *iova; | 748 | struct iova *iova; |
667 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | 749 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
750 | size_t size = cookie_msi_granule(cookie); | ||
668 | 751 | ||
669 | msi_addr &= ~(phys_addr_t)iova_mask(iovad); | 752 | msi_addr &= ~(phys_addr_t)(size - 1); |
670 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) | 753 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
671 | if (msi_page->phys == msi_addr) | 754 | if (msi_page->phys == msi_addr) |
672 | return msi_page; | 755 | return msi_page; |
@@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
675 | if (!msi_page) | 758 | if (!msi_page) |
676 | return NULL; | 759 | return NULL; |
677 | 760 | ||
678 | iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); | ||
679 | if (!iova) | ||
680 | goto out_free_page; | ||
681 | |||
682 | msi_page->phys = msi_addr; | 761 | msi_page->phys = msi_addr; |
683 | msi_page->iova = iova_dma_addr(iovad, iova); | 762 | if (iovad) { |
684 | if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) | 763 | iova = __alloc_iova(domain, size, dma_get_mask(dev), dev); |
764 | if (!iova) | ||
765 | goto out_free_page; | ||
766 | msi_page->iova = iova_dma_addr(iovad, iova); | ||
767 | } else { | ||
768 | msi_page->iova = cookie->msi_iova; | ||
769 | cookie->msi_iova += size; | ||
770 | } | ||
771 | |||
772 | if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) | ||
685 | goto out_free_iova; | 773 | goto out_free_iova; |
686 | 774 | ||
687 | INIT_LIST_HEAD(&msi_page->list); | 775 | INIT_LIST_HEAD(&msi_page->list); |
@@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
689 | return msi_page; | 777 | return msi_page; |
690 | 778 | ||
691 | out_free_iova: | 779 | out_free_iova: |
692 | __free_iova(iovad, iova); | 780 | if (iovad) |
781 | __free_iova(iovad, iova); | ||
782 | else | ||
783 | cookie->msi_iova -= size; | ||
693 | out_free_page: | 784 | out_free_page: |
694 | kfree(msi_page); | 785 | kfree(msi_page); |
695 | return NULL; | 786 | return NULL; |
@@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |||
730 | msg->data = ~0U; | 821 | msg->data = ~0U; |
731 | } else { | 822 | } else { |
732 | msg->address_hi = upper_32_bits(msi_page->iova); | 823 | msg->address_hi = upper_32_bits(msi_page->iova); |
733 | msg->address_lo &= iova_mask(&cookie->iovad); | 824 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
734 | msg->address_lo += lower_32_bits(msi_page->iova); | 825 | msg->address_lo += lower_32_bits(msi_page->iova); |
735 | } | 826 | } |
736 | } | 827 | } |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 8ccbd7023194..d9c0decfc91a 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; | |||
74 | static int alloc_iommu(struct dmar_drhd_unit *drhd); | 74 | static int alloc_iommu(struct dmar_drhd_unit *drhd); |
75 | static void free_iommu(struct intel_iommu *iommu); | 75 | static void free_iommu(struct intel_iommu *iommu); |
76 | 76 | ||
77 | extern const struct iommu_ops intel_iommu_ops; | ||
78 | |||
77 | static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 79 | static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) |
78 | { | 80 | { |
79 | /* | 81 | /* |
@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
1078 | raw_spin_lock_init(&iommu->register_lock); | 1080 | raw_spin_lock_init(&iommu->register_lock); |
1079 | 1081 | ||
1080 | if (intel_iommu_enabled) { | 1082 | if (intel_iommu_enabled) { |
1081 | iommu->iommu_dev = iommu_device_create(NULL, iommu, | 1083 | err = iommu_device_sysfs_add(&iommu->iommu, NULL, |
1082 | intel_iommu_groups, | 1084 | intel_iommu_groups, |
1083 | "%s", iommu->name); | 1085 | "%s", iommu->name); |
1086 | if (err) | ||
1087 | goto err_unmap; | ||
1084 | 1088 | ||
1085 | if (IS_ERR(iommu->iommu_dev)) { | 1089 | iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); |
1086 | err = PTR_ERR(iommu->iommu_dev); | 1090 | |
1091 | err = iommu_device_register(&iommu->iommu); | ||
1092 | if (err) | ||
1087 | goto err_unmap; | 1093 | goto err_unmap; |
1088 | } | ||
1089 | } | 1094 | } |
1090 | 1095 | ||
1091 | drhd->iommu = iommu; | 1096 | drhd->iommu = iommu; |
@@ -1103,7 +1108,8 @@ error: | |||
1103 | 1108 | ||
1104 | static void free_iommu(struct intel_iommu *iommu) | 1109 | static void free_iommu(struct intel_iommu *iommu) |
1105 | { | 1110 | { |
1106 | iommu_device_destroy(iommu->iommu_dev); | 1111 | iommu_device_sysfs_remove(&iommu->iommu); |
1112 | iommu_device_unregister(&iommu->iommu); | ||
1107 | 1113 | ||
1108 | if (iommu->irq) { | 1114 | if (iommu->irq) { |
1109 | if (iommu->pr_irq) { | 1115 | if (iommu->pr_irq) { |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 57ba0d3091ea..a7e0821c9967 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -276,6 +276,8 @@ struct sysmmu_drvdata { | |||
276 | struct list_head owner_node; /* node for owner controllers list */ | 276 | struct list_head owner_node; /* node for owner controllers list */ |
277 | phys_addr_t pgtable; /* assigned page table structure */ | 277 | phys_addr_t pgtable; /* assigned page table structure */ |
278 | unsigned int version; /* our version */ | 278 | unsigned int version; /* our version */ |
279 | |||
280 | struct iommu_device iommu; /* IOMMU core handle */ | ||
279 | }; | 281 | }; |
280 | 282 | ||
281 | static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) | 283 | static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) |
@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data, | |||
381 | { | 383 | { |
382 | sysmmu_pte_t *ent; | 384 | sysmmu_pte_t *ent; |
383 | 385 | ||
384 | dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n", | 386 | dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n", |
385 | finfo->name, fault_addr, &data->pgtable); | 387 | dev_name(data->master), finfo->name, fault_addr); |
388 | dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); | ||
386 | ent = section_entry(phys_to_virt(data->pgtable), fault_addr); | 389 | ent = section_entry(phys_to_virt(data->pgtable), fault_addr); |
387 | dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent); | 390 | dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); |
388 | if (lv1ent_page(ent)) { | 391 | if (lv1ent_page(ent)) { |
389 | ent = page_entry(ent, fault_addr); | 392 | ent = page_entry(ent, fault_addr); |
390 | dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); | 393 | dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); |
391 | } | 394 | } |
392 | } | 395 | } |
393 | 396 | ||
@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) | |||
611 | data->sysmmu = dev; | 614 | data->sysmmu = dev; |
612 | spin_lock_init(&data->lock); | 615 | spin_lock_init(&data->lock); |
613 | 616 | ||
617 | ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, | ||
618 | dev_name(data->sysmmu)); | ||
619 | if (ret) | ||
620 | return ret; | ||
621 | |||
622 | iommu_device_set_ops(&data->iommu, &exynos_iommu_ops); | ||
623 | iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode); | ||
624 | |||
625 | ret = iommu_device_register(&data->iommu); | ||
626 | if (ret) | ||
627 | return ret; | ||
628 | |||
614 | platform_set_drvdata(pdev, data); | 629 | platform_set_drvdata(pdev, data); |
615 | 630 | ||
616 | __sysmmu_get_version(data); | 631 | __sysmmu_get_version(data); |
@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) | |||
628 | 643 | ||
629 | pm_runtime_enable(dev); | 644 | pm_runtime_enable(dev); |
630 | 645 | ||
631 | of_iommu_set_ops(dev->of_node, &exynos_iommu_ops); | ||
632 | |||
633 | return 0; | 646 | return 0; |
634 | } | 647 | } |
635 | 648 | ||
@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) | |||
743 | DMA_TO_DEVICE); | 756 | DMA_TO_DEVICE); |
744 | /* For mapping page table entries we rely on dma == phys */ | 757 | /* For mapping page table entries we rely on dma == phys */ |
745 | BUG_ON(handle != virt_to_phys(domain->pgtable)); | 758 | BUG_ON(handle != virt_to_phys(domain->pgtable)); |
759 | if (dma_mapping_error(dma_dev, handle)) | ||
760 | goto err_lv2ent; | ||
746 | 761 | ||
747 | spin_lock_init(&domain->lock); | 762 | spin_lock_init(&domain->lock); |
748 | spin_lock_init(&domain->pgtablelock); | 763 | spin_lock_init(&domain->pgtablelock); |
@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) | |||
754 | 769 | ||
755 | return &domain->domain; | 770 | return &domain->domain; |
756 | 771 | ||
772 | err_lv2ent: | ||
773 | free_pages((unsigned long)domain->lv2entcnt, 1); | ||
757 | err_counter: | 774 | err_counter: |
758 | free_pages((unsigned long)domain->pgtable, 2); | 775 | free_pages((unsigned long)domain->pgtable, 2); |
759 | err_dma_cookie: | 776 | err_dma_cookie: |
@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, | |||
897 | } | 914 | } |
898 | 915 | ||
899 | if (lv1ent_fault(sent)) { | 916 | if (lv1ent_fault(sent)) { |
917 | dma_addr_t handle; | ||
900 | sysmmu_pte_t *pent; | 918 | sysmmu_pte_t *pent; |
901 | bool need_flush_flpd_cache = lv1ent_zero(sent); | 919 | bool need_flush_flpd_cache = lv1ent_zero(sent); |
902 | 920 | ||
@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, | |||
908 | update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); | 926 | update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); |
909 | kmemleak_ignore(pent); | 927 | kmemleak_ignore(pent); |
910 | *pgcounter = NUM_LV2ENTRIES; | 928 | *pgcounter = NUM_LV2ENTRIES; |
911 | dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE); | 929 | handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, |
930 | DMA_TO_DEVICE); | ||
931 | if (dma_mapping_error(dma_dev, handle)) { | ||
932 | kmem_cache_free(lv2table_kmem_cache, pent); | ||
933 | return ERR_PTR(-EADDRINUSE); | ||
934 | } | ||
912 | 935 | ||
913 | /* | 936 | /* |
914 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, | 937 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, |
@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev) | |||
1231 | 1254 | ||
1232 | static void exynos_iommu_remove_device(struct device *dev) | 1255 | static void exynos_iommu_remove_device(struct device *dev) |
1233 | { | 1256 | { |
1257 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | ||
1258 | |||
1234 | if (!has_sysmmu(dev)) | 1259 | if (!has_sysmmu(dev)) |
1235 | return; | 1260 | return; |
1236 | 1261 | ||
1262 | if (owner->domain) { | ||
1263 | struct iommu_group *group = iommu_group_get(dev); | ||
1264 | |||
1265 | if (group) { | ||
1266 | WARN_ON(owner->domain != | ||
1267 | iommu_group_default_domain(group)); | ||
1268 | exynos_iommu_detach_device(owner->domain, dev); | ||
1269 | iommu_group_put(group); | ||
1270 | } | ||
1271 | } | ||
1237 | iommu_group_remove_device(dev); | 1272 | iommu_group_remove_device(dev); |
1238 | } | 1273 | } |
1239 | 1274 | ||
@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev, | |||
1242 | { | 1277 | { |
1243 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | 1278 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
1244 | struct platform_device *sysmmu = of_find_device_by_node(spec->np); | 1279 | struct platform_device *sysmmu = of_find_device_by_node(spec->np); |
1245 | struct sysmmu_drvdata *data; | 1280 | struct sysmmu_drvdata *data, *entry; |
1246 | 1281 | ||
1247 | if (!sysmmu) | 1282 | if (!sysmmu) |
1248 | return -ENODEV; | 1283 | return -ENODEV; |
@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev, | |||
1261 | dev->archdata.iommu = owner; | 1296 | dev->archdata.iommu = owner; |
1262 | } | 1297 | } |
1263 | 1298 | ||
1299 | list_for_each_entry(entry, &owner->controllers, owner_node) | ||
1300 | if (entry == data) | ||
1301 | return 0; | ||
1302 | |||
1264 | list_add_tail(&data->owner_node, &owner->controllers); | 1303 | list_add_tail(&data->owner_node, &owner->controllers); |
1265 | data->master = dev; | 1304 | data->master = dev; |
1266 | 1305 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 8a185250ae5a..f5e02f8e7371 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit { | |||
440 | u64 end_address; /* reserved end address */ | 440 | u64 end_address; /* reserved end address */ |
441 | struct dmar_dev_scope *devices; /* target devices */ | 441 | struct dmar_dev_scope *devices; /* target devices */ |
442 | int devices_cnt; /* target device count */ | 442 | int devices_cnt; /* target device count */ |
443 | struct iommu_resv_region *resv; /* reserved region handle */ | ||
443 | }; | 444 | }; |
444 | 445 | ||
445 | struct dmar_atsr_unit { | 446 | struct dmar_atsr_unit { |
@@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | |||
547 | static DEFINE_SPINLOCK(device_domain_lock); | 548 | static DEFINE_SPINLOCK(device_domain_lock); |
548 | static LIST_HEAD(device_domain_list); | 549 | static LIST_HEAD(device_domain_list); |
549 | 550 | ||
550 | static const struct iommu_ops intel_iommu_ops; | 551 | const struct iommu_ops intel_iommu_ops; |
551 | 552 | ||
552 | static bool translation_pre_enabled(struct intel_iommu *iommu) | 553 | static bool translation_pre_enabled(struct intel_iommu *iommu) |
553 | { | 554 | { |
@@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, | |||
1144 | if (!dma_pte_present(pte) || dma_pte_superpage(pte)) | 1145 | if (!dma_pte_present(pte) || dma_pte_superpage(pte)) |
1145 | goto next; | 1146 | goto next; |
1146 | 1147 | ||
1147 | level_pfn = pfn & level_mask(level - 1); | 1148 | level_pfn = pfn & level_mask(level); |
1148 | level_pte = phys_to_virt(dma_pte_addr(pte)); | 1149 | level_pte = phys_to_virt(dma_pte_addr(pte)); |
1149 | 1150 | ||
1150 | if (level > 2) | 1151 | if (level > 2) |
@@ -3325,13 +3326,14 @@ static int __init init_dmars(void) | |||
3325 | iommu_identity_mapping |= IDENTMAP_GFX; | 3326 | iommu_identity_mapping |= IDENTMAP_GFX; |
3326 | #endif | 3327 | #endif |
3327 | 3328 | ||
3329 | check_tylersburg_isoch(); | ||
3330 | |||
3328 | if (iommu_identity_mapping) { | 3331 | if (iommu_identity_mapping) { |
3329 | ret = si_domain_init(hw_pass_through); | 3332 | ret = si_domain_init(hw_pass_through); |
3330 | if (ret) | 3333 | if (ret) |
3331 | goto free_iommu; | 3334 | goto free_iommu; |
3332 | } | 3335 | } |
3333 | 3336 | ||
3334 | check_tylersburg_isoch(); | ||
3335 | 3337 | ||
3336 | /* | 3338 | /* |
3337 | * If we copied translations from a previous kernel in the kdump | 3339 | * If we copied translations from a previous kernel in the kdump |
@@ -4246,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {} | |||
4246 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) | 4248 | int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) |
4247 | { | 4249 | { |
4248 | struct acpi_dmar_reserved_memory *rmrr; | 4250 | struct acpi_dmar_reserved_memory *rmrr; |
4251 | int prot = DMA_PTE_READ|DMA_PTE_WRITE; | ||
4249 | struct dmar_rmrr_unit *rmrru; | 4252 | struct dmar_rmrr_unit *rmrru; |
4253 | size_t length; | ||
4250 | 4254 | ||
4251 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); | 4255 | rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); |
4252 | if (!rmrru) | 4256 | if (!rmrru) |
4253 | return -ENOMEM; | 4257 | goto out; |
4254 | 4258 | ||
4255 | rmrru->hdr = header; | 4259 | rmrru->hdr = header; |
4256 | rmrr = (struct acpi_dmar_reserved_memory *)header; | 4260 | rmrr = (struct acpi_dmar_reserved_memory *)header; |
4257 | rmrru->base_address = rmrr->base_address; | 4261 | rmrru->base_address = rmrr->base_address; |
4258 | rmrru->end_address = rmrr->end_address; | 4262 | rmrru->end_address = rmrr->end_address; |
4263 | |||
4264 | length = rmrr->end_address - rmrr->base_address + 1; | ||
4265 | rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot, | ||
4266 | IOMMU_RESV_DIRECT); | ||
4267 | if (!rmrru->resv) | ||
4268 | goto free_rmrru; | ||
4269 | |||
4259 | rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), | 4270 | rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), |
4260 | ((void *)rmrr) + rmrr->header.length, | 4271 | ((void *)rmrr) + rmrr->header.length, |
4261 | &rmrru->devices_cnt); | 4272 | &rmrru->devices_cnt); |
4262 | if (rmrru->devices_cnt && rmrru->devices == NULL) { | 4273 | if (rmrru->devices_cnt && rmrru->devices == NULL) |
4263 | kfree(rmrru); | 4274 | goto free_all; |
4264 | return -ENOMEM; | ||
4265 | } | ||
4266 | 4275 | ||
4267 | list_add(&rmrru->list, &dmar_rmrr_units); | 4276 | list_add(&rmrru->list, &dmar_rmrr_units); |
4268 | 4277 | ||
4269 | return 0; | 4278 | return 0; |
4279 | free_all: | ||
4280 | kfree(rmrru->resv); | ||
4281 | free_rmrru: | ||
4282 | kfree(rmrru); | ||
4283 | out: | ||
4284 | return -ENOMEM; | ||
4270 | } | 4285 | } |
4271 | 4286 | ||
4272 | static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) | 4287 | static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) |
@@ -4480,6 +4495,7 @@ static void intel_iommu_free_dmars(void) | |||
4480 | list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { | 4495 | list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { |
4481 | list_del(&rmrru->list); | 4496 | list_del(&rmrru->list); |
4482 | dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); | 4497 | dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); |
4498 | kfree(rmrru->resv); | ||
4483 | kfree(rmrru); | 4499 | kfree(rmrru); |
4484 | } | 4500 | } |
4485 | 4501 | ||
@@ -4853,10 +4869,13 @@ int __init intel_iommu_init(void) | |||
4853 | 4869 | ||
4854 | init_iommu_pm_ops(); | 4870 | init_iommu_pm_ops(); |
4855 | 4871 | ||
4856 | for_each_active_iommu(iommu, drhd) | 4872 | for_each_active_iommu(iommu, drhd) { |
4857 | iommu->iommu_dev = iommu_device_create(NULL, iommu, | 4873 | iommu_device_sysfs_add(&iommu->iommu, NULL, |
4858 | intel_iommu_groups, | 4874 | intel_iommu_groups, |
4859 | "%s", iommu->name); | 4875 | "%s", iommu->name); |
4876 | iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); | ||
4877 | iommu_device_register(&iommu->iommu); | ||
4878 | } | ||
4860 | 4879 | ||
4861 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); | 4880 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); |
4862 | bus_register_notifier(&pci_bus_type, &device_nb); | 4881 | bus_register_notifier(&pci_bus_type, &device_nb); |
@@ -5178,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev) | |||
5178 | if (!iommu) | 5197 | if (!iommu) |
5179 | return -ENODEV; | 5198 | return -ENODEV; |
5180 | 5199 | ||
5181 | iommu_device_link(iommu->iommu_dev, dev); | 5200 | iommu_device_link(&iommu->iommu, dev); |
5182 | 5201 | ||
5183 | group = iommu_group_get_for_dev(dev); | 5202 | group = iommu_group_get_for_dev(dev); |
5184 | 5203 | ||
@@ -5200,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev) | |||
5200 | 5219 | ||
5201 | iommu_group_remove_device(dev); | 5220 | iommu_group_remove_device(dev); |
5202 | 5221 | ||
5203 | iommu_device_unlink(iommu->iommu_dev, dev); | 5222 | iommu_device_unlink(&iommu->iommu, dev); |
5223 | } | ||
5224 | |||
5225 | static void intel_iommu_get_resv_regions(struct device *device, | ||
5226 | struct list_head *head) | ||
5227 | { | ||
5228 | struct iommu_resv_region *reg; | ||
5229 | struct dmar_rmrr_unit *rmrr; | ||
5230 | struct device *i_dev; | ||
5231 | int i; | ||
5232 | |||
5233 | rcu_read_lock(); | ||
5234 | for_each_rmrr_units(rmrr) { | ||
5235 | for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, | ||
5236 | i, i_dev) { | ||
5237 | if (i_dev != device) | ||
5238 | continue; | ||
5239 | |||
5240 | list_add_tail(&rmrr->resv->list, head); | ||
5241 | } | ||
5242 | } | ||
5243 | rcu_read_unlock(); | ||
5244 | |||
5245 | reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, | ||
5246 | IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, | ||
5247 | 0, IOMMU_RESV_RESERVED); | ||
5248 | if (!reg) | ||
5249 | return; | ||
5250 | list_add_tail(®->list, head); | ||
5251 | } | ||
5252 | |||
5253 | static void intel_iommu_put_resv_regions(struct device *dev, | ||
5254 | struct list_head *head) | ||
5255 | { | ||
5256 | struct iommu_resv_region *entry, *next; | ||
5257 | |||
5258 | list_for_each_entry_safe(entry, next, head, list) { | ||
5259 | if (entry->type == IOMMU_RESV_RESERVED) | ||
5260 | kfree(entry); | ||
5261 | } | ||
5204 | } | 5262 | } |
5205 | 5263 | ||
5206 | #ifdef CONFIG_INTEL_IOMMU_SVM | 5264 | #ifdef CONFIG_INTEL_IOMMU_SVM |
@@ -5332,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) | |||
5332 | } | 5390 | } |
5333 | #endif /* CONFIG_INTEL_IOMMU_SVM */ | 5391 | #endif /* CONFIG_INTEL_IOMMU_SVM */ |
5334 | 5392 | ||
5335 | static const struct iommu_ops intel_iommu_ops = { | 5393 | const struct iommu_ops intel_iommu_ops = { |
5336 | .capable = intel_iommu_capable, | 5394 | .capable = intel_iommu_capable, |
5337 | .domain_alloc = intel_iommu_domain_alloc, | 5395 | .domain_alloc = intel_iommu_domain_alloc, |
5338 | .domain_free = intel_iommu_domain_free, | 5396 | .domain_free = intel_iommu_domain_free, |
5339 | .attach_dev = intel_iommu_attach_device, | 5397 | .attach_dev = intel_iommu_attach_device, |
5340 | .detach_dev = intel_iommu_detach_device, | 5398 | .detach_dev = intel_iommu_detach_device, |
5341 | .map = intel_iommu_map, | 5399 | .map = intel_iommu_map, |
5342 | .unmap = intel_iommu_unmap, | 5400 | .unmap = intel_iommu_unmap, |
5343 | .map_sg = default_iommu_map_sg, | 5401 | .map_sg = default_iommu_map_sg, |
5344 | .iova_to_phys = intel_iommu_iova_to_phys, | 5402 | .iova_to_phys = intel_iommu_iova_to_phys, |
5345 | .add_device = intel_iommu_add_device, | 5403 | .add_device = intel_iommu_add_device, |
5346 | .remove_device = intel_iommu_remove_device, | 5404 | .remove_device = intel_iommu_remove_device, |
5347 | .device_group = pci_device_group, | 5405 | .get_resv_regions = intel_iommu_get_resv_regions, |
5348 | .pgsize_bitmap = INTEL_IOMMU_PGSIZES, | 5406 | .put_resv_regions = intel_iommu_put_resv_regions, |
5407 | .device_group = pci_device_group, | ||
5408 | .pgsize_bitmap = INTEL_IOMMU_PGSIZES, | ||
5349 | }; | 5409 | }; |
5350 | 5410 | ||
5351 | static void quirk_iommu_g4x_gfx(struct pci_dev *dev) | 5411 | static void quirk_iommu_g4x_gfx(struct pci_dev *dev) |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 0769276c0537..1c049e2e12bf 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, | |||
265 | if (!(prot & IOMMU_MMIO)) | 265 | if (!(prot & IOMMU_MMIO)) |
266 | pte |= ARM_V7S_ATTR_TEX(1); | 266 | pte |= ARM_V7S_ATTR_TEX(1); |
267 | if (ap) { | 267 | if (ap) { |
268 | pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV; | 268 | pte |= ARM_V7S_PTE_AF; |
269 | if (!(prot & IOMMU_PRIV)) | ||
270 | pte |= ARM_V7S_PTE_AP_UNPRIV; | ||
269 | if (!(prot & IOMMU_WRITE)) | 271 | if (!(prot & IOMMU_WRITE)) |
270 | pte |= ARM_V7S_PTE_AP_RDONLY; | 272 | pte |= ARM_V7S_PTE_AP_RDONLY; |
271 | } | 273 | } |
@@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) | |||
288 | 290 | ||
289 | if (!(attr & ARM_V7S_PTE_AP_RDONLY)) | 291 | if (!(attr & ARM_V7S_PTE_AP_RDONLY)) |
290 | prot |= IOMMU_WRITE; | 292 | prot |= IOMMU_WRITE; |
293 | if (!(attr & ARM_V7S_PTE_AP_UNPRIV)) | ||
294 | prot |= IOMMU_PRIV; | ||
291 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) | 295 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) |
292 | prot |= IOMMU_MMIO; | 296 | prot |= IOMMU_MMIO; |
293 | else if (pte & ARM_V7S_ATTR_C) | 297 | else if (pte & ARM_V7S_ATTR_C) |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a40ce3406fef..feacc54bec68 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, | |||
350 | 350 | ||
351 | if (data->iop.fmt == ARM_64_LPAE_S1 || | 351 | if (data->iop.fmt == ARM_64_LPAE_S1 || |
352 | data->iop.fmt == ARM_32_LPAE_S1) { | 352 | data->iop.fmt == ARM_32_LPAE_S1) { |
353 | pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; | 353 | pte = ARM_LPAE_PTE_nG; |
354 | 354 | ||
355 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) | 355 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) |
356 | pte |= ARM_LPAE_PTE_AP_RDONLY; | 356 | pte |= ARM_LPAE_PTE_AP_RDONLY; |
357 | 357 | ||
358 | if (!(prot & IOMMU_PRIV)) | ||
359 | pte |= ARM_LPAE_PTE_AP_UNPRIV; | ||
360 | |||
358 | if (prot & IOMMU_MMIO) | 361 | if (prot & IOMMU_MMIO) |
359 | pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV | 362 | pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV |
360 | << ARM_LPAE_PTE_ATTRINDX_SHIFT); | 363 | << ARM_LPAE_PTE_ATTRINDX_SHIFT); |
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index 39b2d9127dbf..c58351ed61c1 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c | |||
@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void) | |||
50 | postcore_initcall(iommu_dev_init); | 50 | postcore_initcall(iommu_dev_init); |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Create an IOMMU device and return a pointer to it. IOMMU specific | 53 | * Init the struct device for the IOMMU. IOMMU specific attributes can |
54 | * attributes can be provided as an attribute group, allowing a unique | 54 | * be provided as an attribute group, allowing a unique namespace per |
55 | * namespace per IOMMU type. | 55 | * IOMMU type. |
56 | */ | 56 | */ |
57 | struct device *iommu_device_create(struct device *parent, void *drvdata, | 57 | int iommu_device_sysfs_add(struct iommu_device *iommu, |
58 | const struct attribute_group **groups, | 58 | struct device *parent, |
59 | const char *fmt, ...) | 59 | const struct attribute_group **groups, |
60 | const char *fmt, ...) | ||
60 | { | 61 | { |
61 | struct device *dev; | ||
62 | va_list vargs; | 62 | va_list vargs; |
63 | int ret; | 63 | int ret; |
64 | 64 | ||
65 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 65 | device_initialize(&iommu->dev); |
66 | if (!dev) | ||
67 | return ERR_PTR(-ENOMEM); | ||
68 | 66 | ||
69 | device_initialize(dev); | 67 | iommu->dev.class = &iommu_class; |
70 | 68 | iommu->dev.parent = parent; | |
71 | dev->class = &iommu_class; | 69 | iommu->dev.groups = groups; |
72 | dev->parent = parent; | ||
73 | dev->groups = groups; | ||
74 | dev_set_drvdata(dev, drvdata); | ||
75 | 70 | ||
76 | va_start(vargs, fmt); | 71 | va_start(vargs, fmt); |
77 | ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs); | 72 | ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); |
78 | va_end(vargs); | 73 | va_end(vargs); |
79 | if (ret) | 74 | if (ret) |
80 | goto error; | 75 | goto error; |
81 | 76 | ||
82 | ret = device_add(dev); | 77 | ret = device_add(&iommu->dev); |
83 | if (ret) | 78 | if (ret) |
84 | goto error; | 79 | goto error; |
85 | 80 | ||
86 | return dev; | 81 | return 0; |
87 | 82 | ||
88 | error: | 83 | error: |
89 | put_device(dev); | 84 | put_device(&iommu->dev); |
90 | return ERR_PTR(ret); | 85 | return ret; |
91 | } | 86 | } |
92 | 87 | ||
93 | void iommu_device_destroy(struct device *dev) | 88 | void iommu_device_sysfs_remove(struct iommu_device *iommu) |
94 | { | 89 | { |
95 | if (!dev || IS_ERR(dev)) | 90 | device_unregister(&iommu->dev); |
96 | return; | ||
97 | |||
98 | device_unregister(dev); | ||
99 | } | 91 | } |
100 | |||
101 | /* | 92 | /* |
102 | * IOMMU drivers can indicate a device is managed by a given IOMMU using | 93 | * IOMMU drivers can indicate a device is managed by a given IOMMU using |
103 | * this interface. A link to the device will be created in the "devices" | 94 | * this interface. A link to the device will be created in the "devices" |
104 | * directory of the IOMMU device in sysfs and an "iommu" link will be | 95 | * directory of the IOMMU device in sysfs and an "iommu" link will be |
105 | * created under the linked device, pointing back at the IOMMU device. | 96 | * created under the linked device, pointing back at the IOMMU device. |
106 | */ | 97 | */ |
107 | int iommu_device_link(struct device *dev, struct device *link) | 98 | int iommu_device_link(struct iommu_device *iommu, struct device *link) |
108 | { | 99 | { |
109 | int ret; | 100 | int ret; |
110 | 101 | ||
111 | if (!dev || IS_ERR(dev)) | 102 | if (!iommu || IS_ERR(iommu)) |
112 | return -ENODEV; | 103 | return -ENODEV; |
113 | 104 | ||
114 | ret = sysfs_add_link_to_group(&dev->kobj, "devices", | 105 | ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", |
115 | &link->kobj, dev_name(link)); | 106 | &link->kobj, dev_name(link)); |
116 | if (ret) | 107 | if (ret) |
117 | return ret; | 108 | return ret; |
118 | 109 | ||
119 | ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu"); | 110 | ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); |
120 | if (ret) | 111 | if (ret) |
121 | sysfs_remove_link_from_group(&dev->kobj, "devices", | 112 | sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", |
122 | dev_name(link)); | 113 | dev_name(link)); |
123 | 114 | ||
124 | return ret; | 115 | return ret; |
125 | } | 116 | } |
126 | 117 | ||
127 | void iommu_device_unlink(struct device *dev, struct device *link) | 118 | void iommu_device_unlink(struct iommu_device *iommu, struct device *link) |
128 | { | 119 | { |
129 | if (!dev || IS_ERR(dev)) | 120 | if (!iommu || IS_ERR(iommu)) |
130 | return; | 121 | return; |
131 | 122 | ||
132 | sysfs_remove_link(&link->kobj, "iommu"); | 123 | sysfs_remove_link(&link->kobj, "iommu"); |
133 | sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link)); | 124 | sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); |
134 | } | 125 | } |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index dbe7f653bb7c..8ea14f41a979 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -55,7 +55,7 @@ struct iommu_group { | |||
55 | struct iommu_domain *domain; | 55 | struct iommu_domain *domain; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | struct iommu_device { | 58 | struct group_device { |
59 | struct list_head list; | 59 | struct list_head list; |
60 | struct device *dev; | 60 | struct device *dev; |
61 | char *name; | 61 | char *name; |
@@ -68,6 +68,12 @@ struct iommu_group_attribute { | |||
68 | const char *buf, size_t count); | 68 | const char *buf, size_t count); |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static const char * const iommu_group_resv_type_string[] = { | ||
72 | [IOMMU_RESV_DIRECT] = "direct", | ||
73 | [IOMMU_RESV_RESERVED] = "reserved", | ||
74 | [IOMMU_RESV_MSI] = "msi", | ||
75 | }; | ||
76 | |||
71 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ | 77 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ |
72 | struct iommu_group_attribute iommu_group_attr_##_name = \ | 78 | struct iommu_group_attribute iommu_group_attr_##_name = \ |
73 | __ATTR(_name, _mode, _show, _store) | 79 | __ATTR(_name, _mode, _show, _store) |
@@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ | |||
77 | #define to_iommu_group(_kobj) \ | 83 | #define to_iommu_group(_kobj) \ |
78 | container_of(_kobj, struct iommu_group, kobj) | 84 | container_of(_kobj, struct iommu_group, kobj) |
79 | 85 | ||
86 | static LIST_HEAD(iommu_device_list); | ||
87 | static DEFINE_SPINLOCK(iommu_device_lock); | ||
88 | |||
89 | int iommu_device_register(struct iommu_device *iommu) | ||
90 | { | ||
91 | spin_lock(&iommu_device_lock); | ||
92 | list_add_tail(&iommu->list, &iommu_device_list); | ||
93 | spin_unlock(&iommu_device_lock); | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | void iommu_device_unregister(struct iommu_device *iommu) | ||
99 | { | ||
100 | spin_lock(&iommu_device_lock); | ||
101 | list_del(&iommu->list); | ||
102 | spin_unlock(&iommu_device_lock); | ||
103 | } | ||
104 | |||
80 | static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, | 105 | static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, |
81 | unsigned type); | 106 | unsigned type); |
82 | static int __iommu_attach_device(struct iommu_domain *domain, | 107 | static int __iommu_attach_device(struct iommu_domain *domain, |
@@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) | |||
133 | return sprintf(buf, "%s\n", group->name); | 158 | return sprintf(buf, "%s\n", group->name); |
134 | } | 159 | } |
135 | 160 | ||
161 | /** | ||
162 | * iommu_insert_resv_region - Insert a new region in the | ||
163 | * list of reserved regions. | ||
164 | * @new: new region to insert | ||
165 | * @regions: list of regions | ||
166 | * | ||
167 | * The new element is sorted by address with respect to the other | ||
168 | * regions of the same type. In case it overlaps with another | ||
169 | * region of the same type, regions are merged. In case it | ||
170 | * overlaps with another region of different type, regions are | ||
171 | * not merged. | ||
172 | */ | ||
173 | static int iommu_insert_resv_region(struct iommu_resv_region *new, | ||
174 | struct list_head *regions) | ||
175 | { | ||
176 | struct iommu_resv_region *region; | ||
177 | phys_addr_t start = new->start; | ||
178 | phys_addr_t end = new->start + new->length - 1; | ||
179 | struct list_head *pos = regions->next; | ||
180 | |||
181 | while (pos != regions) { | ||
182 | struct iommu_resv_region *entry = | ||
183 | list_entry(pos, struct iommu_resv_region, list); | ||
184 | phys_addr_t a = entry->start; | ||
185 | phys_addr_t b = entry->start + entry->length - 1; | ||
186 | int type = entry->type; | ||
187 | |||
188 | if (end < a) { | ||
189 | goto insert; | ||
190 | } else if (start > b) { | ||
191 | pos = pos->next; | ||
192 | } else if ((start >= a) && (end <= b)) { | ||
193 | if (new->type == type) | ||
194 | goto done; | ||
195 | else | ||
196 | pos = pos->next; | ||
197 | } else { | ||
198 | if (new->type == type) { | ||
199 | phys_addr_t new_start = min(a, start); | ||
200 | phys_addr_t new_end = max(b, end); | ||
201 | |||
202 | list_del(&entry->list); | ||
203 | entry->start = new_start; | ||
204 | entry->length = new_end - new_start + 1; | ||
205 | iommu_insert_resv_region(entry, regions); | ||
206 | } else { | ||
207 | pos = pos->next; | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | insert: | ||
212 | region = iommu_alloc_resv_region(new->start, new->length, | ||
213 | new->prot, new->type); | ||
214 | if (!region) | ||
215 | return -ENOMEM; | ||
216 | |||
217 | list_add_tail(®ion->list, pos); | ||
218 | done: | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static int | ||
223 | iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, | ||
224 | struct list_head *group_resv_regions) | ||
225 | { | ||
226 | struct iommu_resv_region *entry; | ||
227 | int ret = 0; | ||
228 | |||
229 | list_for_each_entry(entry, dev_resv_regions, list) { | ||
230 | ret = iommu_insert_resv_region(entry, group_resv_regions); | ||
231 | if (ret) | ||
232 | break; | ||
233 | } | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | int iommu_get_group_resv_regions(struct iommu_group *group, | ||
238 | struct list_head *head) | ||
239 | { | ||
240 | struct group_device *device; | ||
241 | int ret = 0; | ||
242 | |||
243 | mutex_lock(&group->mutex); | ||
244 | list_for_each_entry(device, &group->devices, list) { | ||
245 | struct list_head dev_resv_regions; | ||
246 | |||
247 | INIT_LIST_HEAD(&dev_resv_regions); | ||
248 | iommu_get_resv_regions(device->dev, &dev_resv_regions); | ||
249 | ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); | ||
250 | iommu_put_resv_regions(device->dev, &dev_resv_regions); | ||
251 | if (ret) | ||
252 | break; | ||
253 | } | ||
254 | mutex_unlock(&group->mutex); | ||
255 | return ret; | ||
256 | } | ||
257 | EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); | ||
258 | |||
259 | static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, | ||
260 | char *buf) | ||
261 | { | ||
262 | struct iommu_resv_region *region, *next; | ||
263 | struct list_head group_resv_regions; | ||
264 | char *str = buf; | ||
265 | |||
266 | INIT_LIST_HEAD(&group_resv_regions); | ||
267 | iommu_get_group_resv_regions(group, &group_resv_regions); | ||
268 | |||
269 | list_for_each_entry_safe(region, next, &group_resv_regions, list) { | ||
270 | str += sprintf(str, "0x%016llx 0x%016llx %s\n", | ||
271 | (long long int)region->start, | ||
272 | (long long int)(region->start + | ||
273 | region->length - 1), | ||
274 | iommu_group_resv_type_string[region->type]); | ||
275 | kfree(region); | ||
276 | } | ||
277 | |||
278 | return (str - buf); | ||
279 | } | ||
280 | |||
136 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); | 281 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); |
137 | 282 | ||
283 | static IOMMU_GROUP_ATTR(reserved_regions, 0444, | ||
284 | iommu_group_show_resv_regions, NULL); | ||
285 | |||
138 | static void iommu_group_release(struct kobject *kobj) | 286 | static void iommu_group_release(struct kobject *kobj) |
139 | { | 287 | { |
140 | struct iommu_group *group = to_iommu_group(kobj); | 288 | struct iommu_group *group = to_iommu_group(kobj); |
@@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void) | |||
212 | */ | 360 | */ |
213 | kobject_put(&group->kobj); | 361 | kobject_put(&group->kobj); |
214 | 362 | ||
363 | ret = iommu_group_create_file(group, | ||
364 | &iommu_group_attr_reserved_regions); | ||
365 | if (ret) | ||
366 | return ERR_PTR(ret); | ||
367 | |||
215 | pr_debug("Allocated group %d\n", group->id); | 368 | pr_debug("Allocated group %d\n", group->id); |
216 | 369 | ||
217 | return group; | 370 | return group; |
@@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, | |||
318 | struct device *dev) | 471 | struct device *dev) |
319 | { | 472 | { |
320 | struct iommu_domain *domain = group->default_domain; | 473 | struct iommu_domain *domain = group->default_domain; |
321 | struct iommu_dm_region *entry; | 474 | struct iommu_resv_region *entry; |
322 | struct list_head mappings; | 475 | struct list_head mappings; |
323 | unsigned long pg_size; | 476 | unsigned long pg_size; |
324 | int ret = 0; | 477 | int ret = 0; |
@@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, | |||
331 | pg_size = 1UL << __ffs(domain->pgsize_bitmap); | 484 | pg_size = 1UL << __ffs(domain->pgsize_bitmap); |
332 | INIT_LIST_HEAD(&mappings); | 485 | INIT_LIST_HEAD(&mappings); |
333 | 486 | ||
334 | iommu_get_dm_regions(dev, &mappings); | 487 | iommu_get_resv_regions(dev, &mappings); |
335 | 488 | ||
336 | /* We need to consider overlapping regions for different devices */ | 489 | /* We need to consider overlapping regions for different devices */ |
337 | list_for_each_entry(entry, &mappings, list) { | 490 | list_for_each_entry(entry, &mappings, list) { |
338 | dma_addr_t start, end, addr; | 491 | dma_addr_t start, end, addr; |
339 | 492 | ||
340 | if (domain->ops->apply_dm_region) | 493 | if (domain->ops->apply_resv_region) |
341 | domain->ops->apply_dm_region(dev, domain, entry); | 494 | domain->ops->apply_resv_region(dev, domain, entry); |
342 | 495 | ||
343 | start = ALIGN(entry->start, pg_size); | 496 | start = ALIGN(entry->start, pg_size); |
344 | end = ALIGN(entry->start + entry->length, pg_size); | 497 | end = ALIGN(entry->start + entry->length, pg_size); |
345 | 498 | ||
499 | if (entry->type != IOMMU_RESV_DIRECT) | ||
500 | continue; | ||
501 | |||
346 | for (addr = start; addr < end; addr += pg_size) { | 502 | for (addr = start; addr < end; addr += pg_size) { |
347 | phys_addr_t phys_addr; | 503 | phys_addr_t phys_addr; |
348 | 504 | ||
@@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, | |||
358 | } | 514 | } |
359 | 515 | ||
360 | out: | 516 | out: |
361 | iommu_put_dm_regions(dev, &mappings); | 517 | iommu_put_resv_regions(dev, &mappings); |
362 | 518 | ||
363 | return ret; | 519 | return ret; |
364 | } | 520 | } |
@@ -374,7 +530,7 @@ out: | |||
374 | int iommu_group_add_device(struct iommu_group *group, struct device *dev) | 530 | int iommu_group_add_device(struct iommu_group *group, struct device *dev) |
375 | { | 531 | { |
376 | int ret, i = 0; | 532 | int ret, i = 0; |
377 | struct iommu_device *device; | 533 | struct group_device *device; |
378 | 534 | ||
379 | device = kzalloc(sizeof(*device), GFP_KERNEL); | 535 | device = kzalloc(sizeof(*device), GFP_KERNEL); |
380 | if (!device) | 536 | if (!device) |
@@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) | |||
383 | device->dev = dev; | 539 | device->dev = dev; |
384 | 540 | ||
385 | ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); | 541 | ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); |
386 | if (ret) { | 542 | if (ret) |
387 | kfree(device); | 543 | goto err_free_device; |
388 | return ret; | ||
389 | } | ||
390 | 544 | ||
391 | device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); | 545 | device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); |
392 | rename: | 546 | rename: |
393 | if (!device->name) { | 547 | if (!device->name) { |
394 | sysfs_remove_link(&dev->kobj, "iommu_group"); | 548 | ret = -ENOMEM; |
395 | kfree(device); | 549 | goto err_remove_link; |
396 | return -ENOMEM; | ||
397 | } | 550 | } |
398 | 551 | ||
399 | ret = sysfs_create_link_nowarn(group->devices_kobj, | 552 | ret = sysfs_create_link_nowarn(group->devices_kobj, |
400 | &dev->kobj, device->name); | 553 | &dev->kobj, device->name); |
401 | if (ret) { | 554 | if (ret) { |
402 | kfree(device->name); | ||
403 | if (ret == -EEXIST && i >= 0) { | 555 | if (ret == -EEXIST && i >= 0) { |
404 | /* | 556 | /* |
405 | * Account for the slim chance of collision | 557 | * Account for the slim chance of collision |
406 | * and append an instance to the name. | 558 | * and append an instance to the name. |
407 | */ | 559 | */ |
560 | kfree(device->name); | ||
408 | device->name = kasprintf(GFP_KERNEL, "%s.%d", | 561 | device->name = kasprintf(GFP_KERNEL, "%s.%d", |
409 | kobject_name(&dev->kobj), i++); | 562 | kobject_name(&dev->kobj), i++); |
410 | goto rename; | 563 | goto rename; |
411 | } | 564 | } |
412 | 565 | goto err_free_name; | |
413 | sysfs_remove_link(&dev->kobj, "iommu_group"); | ||
414 | kfree(device); | ||
415 | return ret; | ||
416 | } | 566 | } |
417 | 567 | ||
418 | kobject_get(group->devices_kobj); | 568 | kobject_get(group->devices_kobj); |
@@ -424,8 +574,10 @@ rename: | |||
424 | mutex_lock(&group->mutex); | 574 | mutex_lock(&group->mutex); |
425 | list_add_tail(&device->list, &group->devices); | 575 | list_add_tail(&device->list, &group->devices); |
426 | if (group->domain) | 576 | if (group->domain) |
427 | __iommu_attach_device(group->domain, dev); | 577 | ret = __iommu_attach_device(group->domain, dev); |
428 | mutex_unlock(&group->mutex); | 578 | mutex_unlock(&group->mutex); |
579 | if (ret) | ||
580 | goto err_put_group; | ||
429 | 581 | ||
430 | /* Notify any listeners about change to group. */ | 582 | /* Notify any listeners about change to group. */ |
431 | blocking_notifier_call_chain(&group->notifier, | 583 | blocking_notifier_call_chain(&group->notifier, |
@@ -436,6 +588,21 @@ rename: | |||
436 | pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); | 588 | pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); |
437 | 589 | ||
438 | return 0; | 590 | return 0; |
591 | |||
592 | err_put_group: | ||
593 | mutex_lock(&group->mutex); | ||
594 | list_del(&device->list); | ||
595 | mutex_unlock(&group->mutex); | ||
596 | dev->iommu_group = NULL; | ||
597 | kobject_put(group->devices_kobj); | ||
598 | err_free_name: | ||
599 | kfree(device->name); | ||
600 | err_remove_link: | ||
601 | sysfs_remove_link(&dev->kobj, "iommu_group"); | ||
602 | err_free_device: | ||
603 | kfree(device); | ||
604 | pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); | ||
605 | return ret; | ||
439 | } | 606 | } |
440 | EXPORT_SYMBOL_GPL(iommu_group_add_device); | 607 | EXPORT_SYMBOL_GPL(iommu_group_add_device); |
441 | 608 | ||
@@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device); | |||
449 | void iommu_group_remove_device(struct device *dev) | 616 | void iommu_group_remove_device(struct device *dev) |
450 | { | 617 | { |
451 | struct iommu_group *group = dev->iommu_group; | 618 | struct iommu_group *group = dev->iommu_group; |
452 | struct iommu_device *tmp_device, *device = NULL; | 619 | struct group_device *tmp_device, *device = NULL; |
453 | 620 | ||
454 | pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); | 621 | pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); |
455 | 622 | ||
@@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device); | |||
484 | 651 | ||
485 | static int iommu_group_device_count(struct iommu_group *group) | 652 | static int iommu_group_device_count(struct iommu_group *group) |
486 | { | 653 | { |
487 | struct iommu_device *entry; | 654 | struct group_device *entry; |
488 | int ret = 0; | 655 | int ret = 0; |
489 | 656 | ||
490 | list_for_each_entry(entry, &group->devices, list) | 657 | list_for_each_entry(entry, &group->devices, list) |
@@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group) | |||
507 | static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, | 674 | static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, |
508 | int (*fn)(struct device *, void *)) | 675 | int (*fn)(struct device *, void *)) |
509 | { | 676 | { |
510 | struct iommu_device *device; | 677 | struct group_device *device; |
511 | int ret = 0; | 678 | int ret = 0; |
512 | 679 | ||
513 | list_for_each_entry(device, &group->devices, list) { | 680 | list_for_each_entry(device, &group->devices, list) { |
@@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain, | |||
1559 | } | 1726 | } |
1560 | EXPORT_SYMBOL_GPL(iommu_domain_set_attr); | 1727 | EXPORT_SYMBOL_GPL(iommu_domain_set_attr); |
1561 | 1728 | ||
1562 | void iommu_get_dm_regions(struct device *dev, struct list_head *list) | 1729 | void iommu_get_resv_regions(struct device *dev, struct list_head *list) |
1563 | { | 1730 | { |
1564 | const struct iommu_ops *ops = dev->bus->iommu_ops; | 1731 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
1565 | 1732 | ||
1566 | if (ops && ops->get_dm_regions) | 1733 | if (ops && ops->get_resv_regions) |
1567 | ops->get_dm_regions(dev, list); | 1734 | ops->get_resv_regions(dev, list); |
1568 | } | 1735 | } |
1569 | 1736 | ||
1570 | void iommu_put_dm_regions(struct device *dev, struct list_head *list) | 1737 | void iommu_put_resv_regions(struct device *dev, struct list_head *list) |
1571 | { | 1738 | { |
1572 | const struct iommu_ops *ops = dev->bus->iommu_ops; | 1739 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
1573 | 1740 | ||
1574 | if (ops && ops->put_dm_regions) | 1741 | if (ops && ops->put_resv_regions) |
1575 | ops->put_dm_regions(dev, list); | 1742 | ops->put_resv_regions(dev, list); |
1743 | } | ||
1744 | |||
1745 | struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, | ||
1746 | size_t length, | ||
1747 | int prot, int type) | ||
1748 | { | ||
1749 | struct iommu_resv_region *region; | ||
1750 | |||
1751 | region = kzalloc(sizeof(*region), GFP_KERNEL); | ||
1752 | if (!region) | ||
1753 | return NULL; | ||
1754 | |||
1755 | INIT_LIST_HEAD(®ion->list); | ||
1756 | region->start = start; | ||
1757 | region->length = length; | ||
1758 | region->prot = prot; | ||
1759 | region->type = type; | ||
1760 | return region; | ||
1576 | } | 1761 | } |
1577 | 1762 | ||
1578 | /* Request that a device is direct mapped by the IOMMU */ | 1763 | /* Request that a device is direct mapped by the IOMMU */ |
@@ -1628,43 +1813,18 @@ out: | |||
1628 | return ret; | 1813 | return ret; |
1629 | } | 1814 | } |
1630 | 1815 | ||
1631 | struct iommu_instance { | 1816 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) |
1632 | struct list_head list; | ||
1633 | struct fwnode_handle *fwnode; | ||
1634 | const struct iommu_ops *ops; | ||
1635 | }; | ||
1636 | static LIST_HEAD(iommu_instance_list); | ||
1637 | static DEFINE_SPINLOCK(iommu_instance_lock); | ||
1638 | |||
1639 | void iommu_register_instance(struct fwnode_handle *fwnode, | ||
1640 | const struct iommu_ops *ops) | ||
1641 | { | 1817 | { |
1642 | struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | ||
1643 | |||
1644 | if (WARN_ON(!iommu)) | ||
1645 | return; | ||
1646 | |||
1647 | of_node_get(to_of_node(fwnode)); | ||
1648 | INIT_LIST_HEAD(&iommu->list); | ||
1649 | iommu->fwnode = fwnode; | ||
1650 | iommu->ops = ops; | ||
1651 | spin_lock(&iommu_instance_lock); | ||
1652 | list_add_tail(&iommu->list, &iommu_instance_list); | ||
1653 | spin_unlock(&iommu_instance_lock); | ||
1654 | } | ||
1655 | |||
1656 | const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) | ||
1657 | { | ||
1658 | struct iommu_instance *instance; | ||
1659 | const struct iommu_ops *ops = NULL; | 1818 | const struct iommu_ops *ops = NULL; |
1819 | struct iommu_device *iommu; | ||
1660 | 1820 | ||
1661 | spin_lock(&iommu_instance_lock); | 1821 | spin_lock(&iommu_device_lock); |
1662 | list_for_each_entry(instance, &iommu_instance_list, list) | 1822 | list_for_each_entry(iommu, &iommu_device_list, list) |
1663 | if (instance->fwnode == fwnode) { | 1823 | if (iommu->fwnode == fwnode) { |
1664 | ops = instance->ops; | 1824 | ops = iommu->ops; |
1665 | break; | 1825 | break; |
1666 | } | 1826 | } |
1667 | spin_unlock(&iommu_instance_lock); | 1827 | spin_unlock(&iommu_device_lock); |
1668 | return ops; | 1828 | return ops; |
1669 | } | 1829 | } |
1670 | 1830 | ||
@@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) | |||
1714 | fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); | 1874 | fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); |
1715 | if (!fwspec) | 1875 | if (!fwspec) |
1716 | return -ENOMEM; | 1876 | return -ENOMEM; |
1877 | |||
1878 | dev->iommu_fwspec = fwspec; | ||
1717 | } | 1879 | } |
1718 | 1880 | ||
1719 | for (i = 0; i < num_ids; i++) | 1881 | for (i = 0; i < num_ids; i++) |
1720 | fwspec->ids[fwspec->num_ids + i] = ids[i]; | 1882 | fwspec->ids[fwspec->num_ids + i] = ids[i]; |
1721 | 1883 | ||
1722 | fwspec->num_ids += num_ids; | 1884 | fwspec->num_ids += num_ids; |
1723 | dev->iommu_fwspec = fwspec; | ||
1724 | return 0; | 1885 | return 0; |
1725 | } | 1886 | } |
1726 | EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); | 1887 | EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 080beca0197d..b7268a14184f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | |||
62 | else { | 62 | else { |
63 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); | 63 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); |
64 | struct iova *curr_iova = | 64 | struct iova *curr_iova = |
65 | container_of(iovad->cached32_node, struct iova, node); | 65 | rb_entry(iovad->cached32_node, struct iova, node); |
66 | *limit_pfn = curr_iova->pfn_lo - 1; | 66 | *limit_pfn = curr_iova->pfn_lo - 1; |
67 | return prev_node; | 67 | return prev_node; |
68 | } | 68 | } |
@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |||
86 | if (!iovad->cached32_node) | 86 | if (!iovad->cached32_node) |
87 | return; | 87 | return; |
88 | curr = iovad->cached32_node; | 88 | curr = iovad->cached32_node; |
89 | cached_iova = container_of(curr, struct iova, node); | 89 | cached_iova = rb_entry(curr, struct iova, node); |
90 | 90 | ||
91 | if (free->pfn_lo >= cached_iova->pfn_lo) { | 91 | if (free->pfn_lo >= cached_iova->pfn_lo) { |
92 | struct rb_node *node = rb_next(&free->node); | 92 | struct rb_node *node = rb_next(&free->node); |
93 | struct iova *iova = container_of(node, struct iova, node); | 93 | struct iova *iova = rb_entry(node, struct iova, node); |
94 | 94 | ||
95 | /* only cache if it's below 32bit pfn */ | 95 | /* only cache if it's below 32bit pfn */ |
96 | if (node && iova->pfn_lo < iovad->dma_32bit_pfn) | 96 | if (node && iova->pfn_lo < iovad->dma_32bit_pfn) |
@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, | |||
125 | curr = __get_cached_rbnode(iovad, &limit_pfn); | 125 | curr = __get_cached_rbnode(iovad, &limit_pfn); |
126 | prev = curr; | 126 | prev = curr; |
127 | while (curr) { | 127 | while (curr) { |
128 | struct iova *curr_iova = container_of(curr, struct iova, node); | 128 | struct iova *curr_iova = rb_entry(curr, struct iova, node); |
129 | 129 | ||
130 | if (limit_pfn < curr_iova->pfn_lo) | 130 | if (limit_pfn < curr_iova->pfn_lo) |
131 | goto move_left; | 131 | goto move_left; |
@@ -171,8 +171,7 @@ move_left: | |||
171 | 171 | ||
172 | /* Figure out where to put new node */ | 172 | /* Figure out where to put new node */ |
173 | while (*entry) { | 173 | while (*entry) { |
174 | struct iova *this = container_of(*entry, | 174 | struct iova *this = rb_entry(*entry, struct iova, node); |
175 | struct iova, node); | ||
176 | parent = *entry; | 175 | parent = *entry; |
177 | 176 | ||
178 | if (new->pfn_lo < this->pfn_lo) | 177 | if (new->pfn_lo < this->pfn_lo) |
@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
201 | struct rb_node **new = &(root->rb_node), *parent = NULL; | 200 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
202 | /* Figure out where to put new node */ | 201 | /* Figure out where to put new node */ |
203 | while (*new) { | 202 | while (*new) { |
204 | struct iova *this = container_of(*new, struct iova, node); | 203 | struct iova *this = rb_entry(*new, struct iova, node); |
205 | 204 | ||
206 | parent = *new; | 205 | parent = *new; |
207 | 206 | ||
@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn) | |||
311 | assert_spin_locked(&iovad->iova_rbtree_lock); | 310 | assert_spin_locked(&iovad->iova_rbtree_lock); |
312 | 311 | ||
313 | while (node) { | 312 | while (node) { |
314 | struct iova *iova = container_of(node, struct iova, node); | 313 | struct iova *iova = rb_entry(node, struct iova, node); |
315 | 314 | ||
316 | /* If pfn falls within iova's range, return iova */ | 315 | /* If pfn falls within iova's range, return iova */ |
317 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { | 316 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { |
@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad) | |||
463 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 462 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
464 | node = rb_first(&iovad->rbroot); | 463 | node = rb_first(&iovad->rbroot); |
465 | while (node) { | 464 | while (node) { |
466 | struct iova *iova = container_of(node, struct iova, node); | 465 | struct iova *iova = rb_entry(node, struct iova, node); |
467 | 466 | ||
468 | rb_erase(node, &iovad->rbroot); | 467 | rb_erase(node, &iovad->rbroot); |
469 | free_iova_mem(iova); | 468 | free_iova_mem(iova); |
@@ -477,7 +476,7 @@ static int | |||
477 | __is_range_overlap(struct rb_node *node, | 476 | __is_range_overlap(struct rb_node *node, |
478 | unsigned long pfn_lo, unsigned long pfn_hi) | 477 | unsigned long pfn_lo, unsigned long pfn_hi) |
479 | { | 478 | { |
480 | struct iova *iova = container_of(node, struct iova, node); | 479 | struct iova *iova = rb_entry(node, struct iova, node); |
481 | 480 | ||
482 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) | 481 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) |
483 | return 1; | 482 | return 1; |
@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad, | |||
541 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 540 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
542 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { | 541 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
543 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { | 542 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { |
544 | iova = container_of(node, struct iova, node); | 543 | iova = rb_entry(node, struct iova, node); |
545 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); | 544 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); |
546 | if ((pfn_lo >= iova->pfn_lo) && | 545 | if ((pfn_lo >= iova->pfn_lo) && |
547 | (pfn_hi <= iova->pfn_hi)) | 546 | (pfn_hi <= iova->pfn_hi)) |
@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
578 | 577 | ||
579 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); | 578 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
580 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { | 579 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
581 | struct iova *iova = container_of(node, struct iova, node); | 580 | struct iova *iova = rb_entry(node, struct iova, node); |
582 | struct iova *new_iova; | 581 | struct iova *new_iova; |
583 | 582 | ||
584 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); | 583 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ace331da6459..b7e14ee863f9 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
313 | domain->cfg.ias = 32; | 313 | domain->cfg.ias = 32; |
314 | domain->cfg.oas = 40; | 314 | domain->cfg.oas = 40; |
315 | domain->cfg.tlb = &ipmmu_gather_ops; | 315 | domain->cfg.tlb = &ipmmu_gather_ops; |
316 | domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); | ||
317 | domain->io_domain.geometry.force_aperture = true; | ||
316 | /* | 318 | /* |
317 | * TODO: Add support for coherent walk through CCI with DVM and remove | 319 | * TODO: Add support for coherent walk through CCI with DVM and remove |
318 | * cache handling. For now, delegate it to the io-pgtable code. | 320 | * cache handling. For now, delegate it to the io-pgtable code. |
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b09692bb5b0a..d0448353d501 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv) | |||
371 | return 0; | 371 | return 0; |
372 | } | 372 | } |
373 | 373 | ||
374 | /* Must be called under msm_iommu_lock */ | ||
375 | static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) | ||
376 | { | ||
377 | struct msm_iommu_dev *iommu, *ret = NULL; | ||
378 | struct msm_iommu_ctx_dev *master; | ||
379 | |||
380 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { | ||
381 | master = list_first_entry(&iommu->ctx_list, | ||
382 | struct msm_iommu_ctx_dev, | ||
383 | list); | ||
384 | if (master->of_node == dev->of_node) { | ||
385 | ret = iommu; | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | |||
390 | return ret; | ||
391 | } | ||
392 | |||
393 | static int msm_iommu_add_device(struct device *dev) | ||
394 | { | ||
395 | struct msm_iommu_dev *iommu; | ||
396 | unsigned long flags; | ||
397 | int ret = 0; | ||
398 | |||
399 | spin_lock_irqsave(&msm_iommu_lock, flags); | ||
400 | |||
401 | iommu = find_iommu_for_dev(dev); | ||
402 | if (iommu) | ||
403 | iommu_device_link(&iommu->iommu, dev); | ||
404 | else | ||
405 | ret = -ENODEV; | ||
406 | |||
407 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | ||
408 | |||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | static void msm_iommu_remove_device(struct device *dev) | ||
413 | { | ||
414 | struct msm_iommu_dev *iommu; | ||
415 | unsigned long flags; | ||
416 | |||
417 | spin_lock_irqsave(&msm_iommu_lock, flags); | ||
418 | |||
419 | iommu = find_iommu_for_dev(dev); | ||
420 | if (iommu) | ||
421 | iommu_device_unlink(&iommu->iommu, dev); | ||
422 | |||
423 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | ||
424 | } | ||
425 | |||
374 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | 426 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
375 | { | 427 | { |
376 | int ret = 0; | 428 | int ret = 0; |
@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = { | |||
646 | .unmap = msm_iommu_unmap, | 698 | .unmap = msm_iommu_unmap, |
647 | .map_sg = default_iommu_map_sg, | 699 | .map_sg = default_iommu_map_sg, |
648 | .iova_to_phys = msm_iommu_iova_to_phys, | 700 | .iova_to_phys = msm_iommu_iova_to_phys, |
701 | .add_device = msm_iommu_add_device, | ||
702 | .remove_device = msm_iommu_remove_device, | ||
649 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, | 703 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, |
650 | .of_xlate = qcom_iommu_of_xlate, | 704 | .of_xlate = qcom_iommu_of_xlate, |
651 | }; | 705 | }; |
@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = { | |||
653 | static int msm_iommu_probe(struct platform_device *pdev) | 707 | static int msm_iommu_probe(struct platform_device *pdev) |
654 | { | 708 | { |
655 | struct resource *r; | 709 | struct resource *r; |
710 | resource_size_t ioaddr; | ||
656 | struct msm_iommu_dev *iommu; | 711 | struct msm_iommu_dev *iommu; |
657 | int ret, par, val; | 712 | int ret, par, val; |
658 | 713 | ||
@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev) | |||
696 | ret = PTR_ERR(iommu->base); | 751 | ret = PTR_ERR(iommu->base); |
697 | goto fail; | 752 | goto fail; |
698 | } | 753 | } |
754 | ioaddr = r->start; | ||
699 | 755 | ||
700 | iommu->irq = platform_get_irq(pdev, 0); | 756 | iommu->irq = platform_get_irq(pdev, 0); |
701 | if (iommu->irq < 0) { | 757 | if (iommu->irq < 0) { |
@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev) | |||
737 | } | 793 | } |
738 | 794 | ||
739 | list_add(&iommu->dev_node, &qcom_iommu_devices); | 795 | list_add(&iommu->dev_node, &qcom_iommu_devices); |
740 | of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops); | 796 | |
797 | ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL, | ||
798 | "msm-smmu.%pa", &ioaddr); | ||
799 | if (ret) { | ||
800 | pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr); | ||
801 | goto fail; | ||
802 | } | ||
803 | |||
804 | iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops); | ||
805 | iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); | ||
806 | |||
807 | ret = iommu_device_register(&iommu->iommu); | ||
808 | if (ret) { | ||
809 | pr_err("Could not register msm-smmu at %pa\n", &ioaddr); | ||
810 | goto fail; | ||
811 | } | ||
741 | 812 | ||
742 | pr_info("device mapped at %p, irq %d with %d ctx banks\n", | 813 | pr_info("device mapped at %p, irq %d with %d ctx banks\n", |
743 | iommu->base, iommu->irq, iommu->ncb); | 814 | iommu->base, iommu->irq, iommu->ncb); |
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h index 4ca25d50d679..ae92d2779c42 100644 --- a/drivers/iommu/msm_iommu.h +++ b/drivers/iommu/msm_iommu.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define MSM_IOMMU_H | 19 | #define MSM_IOMMU_H |
20 | 20 | ||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/iommu.h> | ||
22 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
23 | 24 | ||
24 | /* Sharability attributes of MSM IOMMU mappings */ | 25 | /* Sharability attributes of MSM IOMMU mappings */ |
@@ -68,6 +69,8 @@ struct msm_iommu_dev { | |||
68 | struct list_head dom_node; | 69 | struct list_head dom_node; |
69 | struct list_head ctx_list; | 70 | struct list_head ctx_list; |
70 | DECLARE_BITMAP(context_map, IOMMU_MAX_CBS); | 71 | DECLARE_BITMAP(context_map, IOMMU_MAX_CBS); |
72 | |||
73 | struct iommu_device iommu; | ||
71 | }; | 74 | }; |
72 | 75 | ||
73 | /** | 76 | /** |
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 1479c76ece9e..5d14cd15198d 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c | |||
@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, | |||
360 | 360 | ||
361 | static int mtk_iommu_add_device(struct device *dev) | 361 | static int mtk_iommu_add_device(struct device *dev) |
362 | { | 362 | { |
363 | struct mtk_iommu_data *data; | ||
363 | struct iommu_group *group; | 364 | struct iommu_group *group; |
364 | 365 | ||
365 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) | 366 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
366 | return -ENODEV; /* Not a iommu client device */ | 367 | return -ENODEV; /* Not a iommu client device */ |
367 | 368 | ||
369 | data = dev->iommu_fwspec->iommu_priv; | ||
370 | iommu_device_link(&data->iommu, dev); | ||
371 | |||
368 | group = iommu_group_get_for_dev(dev); | 372 | group = iommu_group_get_for_dev(dev); |
369 | if (IS_ERR(group)) | 373 | if (IS_ERR(group)) |
370 | return PTR_ERR(group); | 374 | return PTR_ERR(group); |
@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev) | |||
375 | 379 | ||
376 | static void mtk_iommu_remove_device(struct device *dev) | 380 | static void mtk_iommu_remove_device(struct device *dev) |
377 | { | 381 | { |
382 | struct mtk_iommu_data *data; | ||
383 | |||
378 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) | 384 | if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) |
379 | return; | 385 | return; |
380 | 386 | ||
387 | data = dev->iommu_fwspec->iommu_priv; | ||
388 | iommu_device_unlink(&data->iommu, dev); | ||
389 | |||
381 | iommu_group_remove_device(dev); | 390 | iommu_group_remove_device(dev); |
382 | iommu_fwspec_free(dev); | 391 | iommu_fwspec_free(dev); |
383 | } | 392 | } |
@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) | |||
497 | struct mtk_iommu_data *data; | 506 | struct mtk_iommu_data *data; |
498 | struct device *dev = &pdev->dev; | 507 | struct device *dev = &pdev->dev; |
499 | struct resource *res; | 508 | struct resource *res; |
509 | resource_size_t ioaddr; | ||
500 | struct component_match *match = NULL; | 510 | struct component_match *match = NULL; |
501 | void *protect; | 511 | void *protect; |
502 | int i, larb_nr, ret; | 512 | int i, larb_nr, ret; |
@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) | |||
519 | data->base = devm_ioremap_resource(dev, res); | 529 | data->base = devm_ioremap_resource(dev, res); |
520 | if (IS_ERR(data->base)) | 530 | if (IS_ERR(data->base)) |
521 | return PTR_ERR(data->base); | 531 | return PTR_ERR(data->base); |
532 | ioaddr = res->start; | ||
522 | 533 | ||
523 | data->irq = platform_get_irq(pdev, 0); | 534 | data->irq = platform_get_irq(pdev, 0); |
524 | if (data->irq < 0) | 535 | if (data->irq < 0) |
@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev) | |||
567 | if (ret) | 578 | if (ret) |
568 | return ret; | 579 | return ret; |
569 | 580 | ||
581 | ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, | ||
582 | "mtk-iommu.%pa", &ioaddr); | ||
583 | if (ret) | ||
584 | return ret; | ||
585 | |||
586 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); | ||
587 | iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); | ||
588 | |||
589 | ret = iommu_device_register(&data->iommu); | ||
590 | if (ret) | ||
591 | return ret; | ||
592 | |||
570 | if (!iommu_present(&platform_bus_type)) | 593 | if (!iommu_present(&platform_bus_type)) |
571 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | 594 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); |
572 | 595 | ||
@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev) | |||
577 | { | 600 | { |
578 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | 601 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); |
579 | 602 | ||
603 | iommu_device_sysfs_remove(&data->iommu); | ||
604 | iommu_device_unregister(&data->iommu); | ||
605 | |||
580 | if (iommu_present(&platform_bus_type)) | 606 | if (iommu_present(&platform_bus_type)) |
581 | bus_set_iommu(&platform_bus_type, NULL); | 607 | bus_set_iommu(&platform_bus_type, NULL); |
582 | 608 | ||
@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np) | |||
655 | return ret; | 681 | return ret; |
656 | } | 682 | } |
657 | 683 | ||
658 | of_iommu_set_ops(np, &mtk_iommu_ops); | ||
659 | return 0; | 684 | return 0; |
660 | } | 685 | } |
661 | 686 | ||
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 50177f738e4e..2a28eadeea0e 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h | |||
@@ -47,6 +47,8 @@ struct mtk_iommu_data { | |||
47 | struct iommu_group *m4u_group; | 47 | struct iommu_group *m4u_group; |
48 | struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ | 48 | struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ |
49 | bool enable_4GB; | 49 | bool enable_4GB; |
50 | |||
51 | struct iommu_device iommu; | ||
50 | }; | 52 | }; |
51 | 53 | ||
52 | static inline int compare_of(struct device *dev, void *data) | 54 | static inline int compare_of(struct device *dev, void *data) |
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 0f57ddc4ecc2..2683e9fc0dcf 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c | |||
@@ -127,7 +127,7 @@ static const struct iommu_ops | |||
127 | "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) | 127 | "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) |
128 | return NULL; | 128 | return NULL; |
129 | 129 | ||
130 | ops = of_iommu_get_ops(iommu_spec.np); | 130 | ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode); |
131 | if (!ops || !ops->of_xlate || | 131 | if (!ops || !ops->of_xlate || |
132 | iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || | 132 | iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || |
133 | ops->of_xlate(&pdev->dev, &iommu_spec)) | 133 | ops->of_xlate(&pdev->dev, &iommu_spec)) |
@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
157 | "#iommu-cells", idx, | 157 | "#iommu-cells", idx, |
158 | &iommu_spec)) { | 158 | &iommu_spec)) { |
159 | np = iommu_spec.np; | 159 | np = iommu_spec.np; |
160 | ops = of_iommu_get_ops(np); | 160 | ops = iommu_ops_from_fwnode(&np->fwnode); |
161 | 161 | ||
162 | if (!ops || !ops->of_xlate || | 162 | if (!ops || !ops->of_xlate || |
163 | iommu_fwspec_init(dev, &np->fwnode, ops) || | 163 | iommu_fwspec_init(dev, &np->fwnode, ops) || |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 4a895c6d6805..23201004fd7a 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1646,6 +1646,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) | |||
1646 | 1646 | ||
1647 | inner_domain->parent = its_parent; | 1647 | inner_domain->parent = its_parent; |
1648 | inner_domain->bus_token = DOMAIN_BUS_NEXUS; | 1648 | inner_domain->bus_token = DOMAIN_BUS_NEXUS; |
1649 | inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; | ||
1649 | info->ops = &its_msi_domain_ops; | 1650 | info->ops = &its_msi_domain_ops; |
1650 | info->data = its; | 1651 | info->data = its; |
1651 | inner_domain->host_data = info; | 1652 | inner_domain->host_data = info; |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index b3cc33fa6d26..bd6f293c4ebd 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/mdev.h> | 39 | #include <linux/mdev.h> |
40 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
41 | #include <linux/dma-iommu.h> | ||
42 | #include <linux/irqdomain.h> | ||
41 | 43 | ||
42 | #define DRIVER_VERSION "0.2" | 44 | #define DRIVER_VERSION "0.2" |
43 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | 45 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" |
@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, | |||
1179 | return NULL; | 1181 | return NULL; |
1180 | } | 1182 | } |
1181 | 1183 | ||
1184 | static bool vfio_iommu_has_resv_msi(struct iommu_group *group, | ||
1185 | phys_addr_t *base) | ||
1186 | { | ||
1187 | struct list_head group_resv_regions; | ||
1188 | struct iommu_resv_region *region, *next; | ||
1189 | bool ret = false; | ||
1190 | |||
1191 | INIT_LIST_HEAD(&group_resv_regions); | ||
1192 | iommu_get_group_resv_regions(group, &group_resv_regions); | ||
1193 | list_for_each_entry(region, &group_resv_regions, list) { | ||
1194 | if (region->type & IOMMU_RESV_MSI) { | ||
1195 | *base = region->start; | ||
1196 | ret = true; | ||
1197 | goto out; | ||
1198 | } | ||
1199 | } | ||
1200 | out: | ||
1201 | list_for_each_entry_safe(region, next, &group_resv_regions, list) | ||
1202 | kfree(region); | ||
1203 | return ret; | ||
1204 | } | ||
1205 | |||
1182 | static int vfio_iommu_type1_attach_group(void *iommu_data, | 1206 | static int vfio_iommu_type1_attach_group(void *iommu_data, |
1183 | struct iommu_group *iommu_group) | 1207 | struct iommu_group *iommu_group) |
1184 | { | 1208 | { |
@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
1187 | struct vfio_domain *domain, *d; | 1211 | struct vfio_domain *domain, *d; |
1188 | struct bus_type *bus = NULL, *mdev_bus; | 1212 | struct bus_type *bus = NULL, *mdev_bus; |
1189 | int ret; | 1213 | int ret; |
1214 | bool resv_msi, msi_remap; | ||
1215 | phys_addr_t resv_msi_base; | ||
1190 | 1216 | ||
1191 | mutex_lock(&iommu->lock); | 1217 | mutex_lock(&iommu->lock); |
1192 | 1218 | ||
@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
1256 | if (ret) | 1282 | if (ret) |
1257 | goto out_domain; | 1283 | goto out_domain; |
1258 | 1284 | ||
1285 | resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); | ||
1286 | |||
1259 | INIT_LIST_HEAD(&domain->group_list); | 1287 | INIT_LIST_HEAD(&domain->group_list); |
1260 | list_add(&group->next, &domain->group_list); | 1288 | list_add(&group->next, &domain->group_list); |
1261 | 1289 | ||
1262 | if (!allow_unsafe_interrupts && | 1290 | msi_remap = resv_msi ? irq_domain_check_msi_remap() : |
1263 | !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { | 1291 | iommu_capable(bus, IOMMU_CAP_INTR_REMAP); |
1292 | |||
1293 | if (!allow_unsafe_interrupts && !msi_remap) { | ||
1264 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", | 1294 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", |
1265 | __func__); | 1295 | __func__); |
1266 | ret = -EPERM; | 1296 | ret = -EPERM; |
@@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
1302 | if (ret) | 1332 | if (ret) |
1303 | goto out_detach; | 1333 | goto out_detach; |
1304 | 1334 | ||
1335 | if (resv_msi) { | ||
1336 | ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); | ||
1337 | if (ret) | ||
1338 | goto out_detach; | ||
1339 | } | ||
1340 | |||
1305 | list_add(&domain->next, &iommu->domain_list); | 1341 | list_add(&domain->next, &iommu->domain_list); |
1306 | 1342 | ||
1307 | mutex_unlock(&iommu->lock); | 1343 | mutex_unlock(&iommu->lock); |
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 7f7e9a7e3839..5725c94b1f12 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
@@ -27,6 +27,7 @@ int iommu_dma_init(void); | |||
27 | 27 | ||
28 | /* Domain management interface for IOMMU drivers */ | 28 | /* Domain management interface for IOMMU drivers */ |
29 | int iommu_get_dma_cookie(struct iommu_domain *domain); | 29 | int iommu_get_dma_cookie(struct iommu_domain *domain); |
30 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); | ||
30 | void iommu_put_dma_cookie(struct iommu_domain *domain); | 31 | void iommu_put_dma_cookie(struct iommu_domain *domain); |
31 | 32 | ||
32 | /* Setup call for arch DMA mapping code */ | 33 | /* Setup call for arch DMA mapping code */ |
@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | |||
34 | u64 size, struct device *dev); | 35 | u64 size, struct device *dev); |
35 | 36 | ||
36 | /* General helpers for DMA-API <-> IOMMU-API interaction */ | 37 | /* General helpers for DMA-API <-> IOMMU-API interaction */ |
37 | int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); | 38 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
39 | unsigned long attrs); | ||
38 | 40 | ||
39 | /* | 41 | /* |
40 | * These implement the bulk of the relevant DMA mapping callbacks, but require | 42 | * These implement the bulk of the relevant DMA mapping callbacks, but require |
@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, | |||
65 | size_t size, enum dma_data_direction dir, unsigned long attrs); | 67 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
66 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | 68 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
67 | size_t size, enum dma_data_direction dir, unsigned long attrs); | 69 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
68 | int iommu_dma_supported(struct device *dev, u64 mask); | ||
69 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | 70 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
70 | 71 | ||
71 | /* The DMA API isn't _quite_ the whole story, though... */ | 72 | /* The DMA API isn't _quite_ the whole story, though... */ |
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain) | |||
86 | return -ENODEV; | 87 | return -ENODEV; |
87 | } | 88 | } |
88 | 89 | ||
90 | static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | ||
91 | { | ||
92 | return -ENODEV; | ||
93 | } | ||
94 | |||
89 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) | 95 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) |
90 | { | 96 | { |
91 | } | 97 | } |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 10c5a17b1f51..c24721a33b4c 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -63,6 +63,13 @@ | |||
63 | #define DMA_ATTR_NO_WARN (1UL << 8) | 63 | #define DMA_ATTR_NO_WARN (1UL << 8) |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | ||
67 | * accessible at an elevated privilege level (and ideally inaccessible or | ||
68 | * at least read-only at lesser-privileged levels). | ||
69 | */ | ||
70 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | ||
71 | |||
72 | /* | ||
66 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | 73 | * A dma_addr_t can hold any valid DMA or bus address for the platform. |
67 | * It can be given to a device to use as a DMA source or target. A CPU cannot | 74 | * It can be given to a device to use as a DMA source or target. A CPU cannot |
68 | * reference a dma_addr_t directly because there may be translation between | 75 | * reference a dma_addr_t directly because there may be translation between |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d49e26c6cdc7..c573a52ae440 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/dma_remapping.h> | 29 | #include <linux/dma_remapping.h> |
30 | #include <linux/mmu_notifier.h> | 30 | #include <linux/mmu_notifier.h> |
31 | #include <linux/list.h> | 31 | #include <linux/list.h> |
32 | #include <linux/iommu.h> | ||
32 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
33 | #include <asm/iommu.h> | 34 | #include <asm/iommu.h> |
34 | 35 | ||
@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
153 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) | 154 | #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) |
154 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) | 155 | #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) |
155 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) | 156 | #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) |
156 | #define DMA_TLB_IIRG(type) ((type >> 60) & 7) | 157 | #define DMA_TLB_IIRG(type) ((type >> 60) & 3) |
157 | #define DMA_TLB_IAIG(val) (((val) >> 57) & 7) | 158 | #define DMA_TLB_IAIG(val) (((val) >> 57) & 3) |
158 | #define DMA_TLB_READ_DRAIN (((u64)1) << 49) | 159 | #define DMA_TLB_READ_DRAIN (((u64)1) << 49) |
159 | #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) | 160 | #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) |
160 | #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) | 161 | #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) |
@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
164 | 165 | ||
165 | /* INVALID_DESC */ | 166 | /* INVALID_DESC */ |
166 | #define DMA_CCMD_INVL_GRANU_OFFSET 61 | 167 | #define DMA_CCMD_INVL_GRANU_OFFSET 61 |
167 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) | 168 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) |
168 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) | 169 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) |
169 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) | 170 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) |
170 | #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) | 171 | #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) |
171 | #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) | 172 | #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) |
172 | #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) | 173 | #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) |
@@ -316,8 +317,8 @@ enum { | |||
316 | #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) | 317 | #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) |
317 | #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) | 318 | #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) |
318 | #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) | 319 | #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) |
319 | #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) | 320 | #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) |
320 | #define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) | 321 | #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) |
321 | #define QI_DEV_EIOTLB_MAX_INVS 32 | 322 | #define QI_DEV_EIOTLB_MAX_INVS 32 |
322 | 323 | ||
323 | #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) | 324 | #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) |
@@ -439,7 +440,7 @@ struct intel_iommu { | |||
439 | struct irq_domain *ir_domain; | 440 | struct irq_domain *ir_domain; |
440 | struct irq_domain *ir_msi_domain; | 441 | struct irq_domain *ir_msi_domain; |
441 | #endif | 442 | #endif |
442 | struct device *iommu_dev; /* IOMMU-sysfs device */ | 443 | struct iommu_device iommu; /* IOMMU core code handle */ |
443 | int node; | 444 | int node; |
444 | u32 flags; /* Software defined flags */ | 445 | u32 flags; /* Software defined flags */ |
445 | }; | 446 | }; |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0ff5111f6959..6a6de187ddc0 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -31,6 +31,13 @@ | |||
31 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ | 31 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ |
32 | #define IOMMU_NOEXEC (1 << 3) | 32 | #define IOMMU_NOEXEC (1 << 3) |
33 | #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ | 33 | #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ |
34 | /* | ||
35 | * This is to make the IOMMU API setup privileged | ||
36 | * mapppings accessible by the master only at higher | ||
37 | * privileged execution level and inaccessible at | ||
38 | * less privileged levels. | ||
39 | */ | ||
40 | #define IOMMU_PRIV (1 << 5) | ||
34 | 41 | ||
35 | struct iommu_ops; | 42 | struct iommu_ops; |
36 | struct iommu_group; | 43 | struct iommu_group; |
@@ -117,18 +124,25 @@ enum iommu_attr { | |||
117 | DOMAIN_ATTR_MAX, | 124 | DOMAIN_ATTR_MAX, |
118 | }; | 125 | }; |
119 | 126 | ||
127 | /* These are the possible reserved region types */ | ||
128 | #define IOMMU_RESV_DIRECT (1 << 0) | ||
129 | #define IOMMU_RESV_RESERVED (1 << 1) | ||
130 | #define IOMMU_RESV_MSI (1 << 2) | ||
131 | |||
120 | /** | 132 | /** |
121 | * struct iommu_dm_region - descriptor for a direct mapped memory region | 133 | * struct iommu_resv_region - descriptor for a reserved memory region |
122 | * @list: Linked list pointers | 134 | * @list: Linked list pointers |
123 | * @start: System physical start address of the region | 135 | * @start: System physical start address of the region |
124 | * @length: Length of the region in bytes | 136 | * @length: Length of the region in bytes |
125 | * @prot: IOMMU Protection flags (READ/WRITE/...) | 137 | * @prot: IOMMU Protection flags (READ/WRITE/...) |
138 | * @type: Type of the reserved region | ||
126 | */ | 139 | */ |
127 | struct iommu_dm_region { | 140 | struct iommu_resv_region { |
128 | struct list_head list; | 141 | struct list_head list; |
129 | phys_addr_t start; | 142 | phys_addr_t start; |
130 | size_t length; | 143 | size_t length; |
131 | int prot; | 144 | int prot; |
145 | int type; | ||
132 | }; | 146 | }; |
133 | 147 | ||
134 | #ifdef CONFIG_IOMMU_API | 148 | #ifdef CONFIG_IOMMU_API |
@@ -150,9 +164,9 @@ struct iommu_dm_region { | |||
150 | * @device_group: find iommu group for a particular device | 164 | * @device_group: find iommu group for a particular device |
151 | * @domain_get_attr: Query domain attributes | 165 | * @domain_get_attr: Query domain attributes |
152 | * @domain_set_attr: Change domain attributes | 166 | * @domain_set_attr: Change domain attributes |
153 | * @get_dm_regions: Request list of direct mapping requirements for a device | 167 | * @get_resv_regions: Request list of reserved regions for a device |
154 | * @put_dm_regions: Free list of direct mapping requirements for a device | 168 | * @put_resv_regions: Free list of reserved regions for a device |
155 | * @apply_dm_region: Temporary helper call-back for iova reserved ranges | 169 | * @apply_resv_region: Temporary helper call-back for iova reserved ranges |
156 | * @domain_window_enable: Configure and enable a particular window for a domain | 170 | * @domain_window_enable: Configure and enable a particular window for a domain |
157 | * @domain_window_disable: Disable a particular window for a domain | 171 | * @domain_window_disable: Disable a particular window for a domain |
158 | * @domain_set_windows: Set the number of windows for a domain | 172 | * @domain_set_windows: Set the number of windows for a domain |
@@ -184,11 +198,12 @@ struct iommu_ops { | |||
184 | int (*domain_set_attr)(struct iommu_domain *domain, | 198 | int (*domain_set_attr)(struct iommu_domain *domain, |
185 | enum iommu_attr attr, void *data); | 199 | enum iommu_attr attr, void *data); |
186 | 200 | ||
187 | /* Request/Free a list of direct mapping requirements for a device */ | 201 | /* Request/Free a list of reserved regions for a device */ |
188 | void (*get_dm_regions)(struct device *dev, struct list_head *list); | 202 | void (*get_resv_regions)(struct device *dev, struct list_head *list); |
189 | void (*put_dm_regions)(struct device *dev, struct list_head *list); | 203 | void (*put_resv_regions)(struct device *dev, struct list_head *list); |
190 | void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, | 204 | void (*apply_resv_region)(struct device *dev, |
191 | struct iommu_dm_region *region); | 205 | struct iommu_domain *domain, |
206 | struct iommu_resv_region *region); | ||
192 | 207 | ||
193 | /* Window handling functions */ | 208 | /* Window handling functions */ |
194 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, | 209 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, |
@@ -204,6 +219,42 @@ struct iommu_ops { | |||
204 | unsigned long pgsize_bitmap; | 219 | unsigned long pgsize_bitmap; |
205 | }; | 220 | }; |
206 | 221 | ||
222 | /** | ||
223 | * struct iommu_device - IOMMU core representation of one IOMMU hardware | ||
224 | * instance | ||
225 | * @list: Used by the iommu-core to keep a list of registered iommus | ||
226 | * @ops: iommu-ops for talking to this iommu | ||
227 | * @dev: struct device for sysfs handling | ||
228 | */ | ||
229 | struct iommu_device { | ||
230 | struct list_head list; | ||
231 | const struct iommu_ops *ops; | ||
232 | struct fwnode_handle *fwnode; | ||
233 | struct device dev; | ||
234 | }; | ||
235 | |||
236 | int iommu_device_register(struct iommu_device *iommu); | ||
237 | void iommu_device_unregister(struct iommu_device *iommu); | ||
238 | int iommu_device_sysfs_add(struct iommu_device *iommu, | ||
239 | struct device *parent, | ||
240 | const struct attribute_group **groups, | ||
241 | const char *fmt, ...) __printf(4, 5); | ||
242 | void iommu_device_sysfs_remove(struct iommu_device *iommu); | ||
243 | int iommu_device_link(struct iommu_device *iommu, struct device *link); | ||
244 | void iommu_device_unlink(struct iommu_device *iommu, struct device *link); | ||
245 | |||
246 | static inline void iommu_device_set_ops(struct iommu_device *iommu, | ||
247 | const struct iommu_ops *ops) | ||
248 | { | ||
249 | iommu->ops = ops; | ||
250 | } | ||
251 | |||
252 | static inline void iommu_device_set_fwnode(struct iommu_device *iommu, | ||
253 | struct fwnode_handle *fwnode) | ||
254 | { | ||
255 | iommu->fwnode = fwnode; | ||
256 | } | ||
257 | |||
207 | #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ | 258 | #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ |
208 | #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ | 259 | #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ |
209 | #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ | 260 | #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ |
@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io | |||
233 | extern void iommu_set_fault_handler(struct iommu_domain *domain, | 284 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
234 | iommu_fault_handler_t handler, void *token); | 285 | iommu_fault_handler_t handler, void *token); |
235 | 286 | ||
236 | extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); | 287 | extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); |
237 | extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); | 288 | extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); |
238 | extern int iommu_request_dm_for_dev(struct device *dev); | 289 | extern int iommu_request_dm_for_dev(struct device *dev); |
290 | extern struct iommu_resv_region * | ||
291 | iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); | ||
292 | extern int iommu_get_group_resv_regions(struct iommu_group *group, | ||
293 | struct list_head *head); | ||
239 | 294 | ||
240 | extern int iommu_attach_group(struct iommu_domain *domain, | 295 | extern int iommu_attach_group(struct iommu_domain *domain, |
241 | struct iommu_group *group); | 296 | struct iommu_group *group); |
@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, | |||
267 | void *data); | 322 | void *data); |
268 | extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, | 323 | extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, |
269 | void *data); | 324 | void *data); |
270 | struct device *iommu_device_create(struct device *parent, void *drvdata, | ||
271 | const struct attribute_group **groups, | ||
272 | const char *fmt, ...) __printf(4, 5); | ||
273 | void iommu_device_destroy(struct device *dev); | ||
274 | int iommu_device_link(struct device *dev, struct device *link); | ||
275 | void iommu_device_unlink(struct device *dev, struct device *link); | ||
276 | 325 | ||
277 | /* Window handling function prototypes */ | 326 | /* Window handling function prototypes */ |
278 | extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, | 327 | extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, | |||
352 | const struct iommu_ops *ops); | 401 | const struct iommu_ops *ops); |
353 | void iommu_fwspec_free(struct device *dev); | 402 | void iommu_fwspec_free(struct device *dev); |
354 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); | 403 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); |
355 | void iommu_register_instance(struct fwnode_handle *fwnode, | 404 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); |
356 | const struct iommu_ops *ops); | ||
357 | const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode); | ||
358 | 405 | ||
359 | #else /* CONFIG_IOMMU_API */ | 406 | #else /* CONFIG_IOMMU_API */ |
360 | 407 | ||
361 | struct iommu_ops {}; | 408 | struct iommu_ops {}; |
362 | struct iommu_group {}; | 409 | struct iommu_group {}; |
363 | struct iommu_fwspec {}; | 410 | struct iommu_fwspec {}; |
411 | struct iommu_device {}; | ||
364 | 412 | ||
365 | static inline bool iommu_present(struct bus_type *bus) | 413 | static inline bool iommu_present(struct bus_type *bus) |
366 | { | 414 | { |
@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain, | |||
443 | { | 491 | { |
444 | } | 492 | } |
445 | 493 | ||
446 | static inline void iommu_get_dm_regions(struct device *dev, | 494 | static inline void iommu_get_resv_regions(struct device *dev, |
447 | struct list_head *list) | 495 | struct list_head *list) |
448 | { | 496 | { |
449 | } | 497 | } |
450 | 498 | ||
451 | static inline void iommu_put_dm_regions(struct device *dev, | 499 | static inline void iommu_put_resv_regions(struct device *dev, |
452 | struct list_head *list) | 500 | struct list_head *list) |
453 | { | 501 | { |
454 | } | 502 | } |
455 | 503 | ||
504 | static inline int iommu_get_group_resv_regions(struct iommu_group *group, | ||
505 | struct list_head *head) | ||
506 | { | ||
507 | return -ENODEV; | ||
508 | } | ||
509 | |||
456 | static inline int iommu_request_dm_for_dev(struct device *dev) | 510 | static inline int iommu_request_dm_for_dev(struct device *dev) |
457 | { | 511 | { |
458 | return -ENODEV; | 512 | return -ENODEV; |
@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain, | |||
546 | return -EINVAL; | 600 | return -EINVAL; |
547 | } | 601 | } |
548 | 602 | ||
549 | static inline struct device *iommu_device_create(struct device *parent, | 603 | static inline int iommu_device_register(struct iommu_device *iommu) |
550 | void *drvdata, | 604 | { |
551 | const struct attribute_group **groups, | 605 | return -ENODEV; |
552 | const char *fmt, ...) | 606 | } |
607 | |||
608 | static inline void iommu_device_set_ops(struct iommu_device *iommu, | ||
609 | const struct iommu_ops *ops) | ||
610 | { | ||
611 | } | ||
612 | |||
613 | static inline void iommu_device_set_fwnode(struct iommu_device *iommu, | ||
614 | struct fwnode_handle *fwnode) | ||
615 | { | ||
616 | } | ||
617 | |||
618 | static inline void iommu_device_unregister(struct iommu_device *iommu) | ||
553 | { | 619 | { |
554 | return ERR_PTR(-ENODEV); | ||
555 | } | 620 | } |
556 | 621 | ||
557 | static inline void iommu_device_destroy(struct device *dev) | 622 | static inline int iommu_device_sysfs_add(struct iommu_device *iommu, |
623 | struct device *parent, | ||
624 | const struct attribute_group **groups, | ||
625 | const char *fmt, ...) | ||
626 | { | ||
627 | return -ENODEV; | ||
628 | } | ||
629 | |||
630 | static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) | ||
558 | { | 631 | { |
559 | } | 632 | } |
560 | 633 | ||
@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, | |||
584 | return -ENODEV; | 657 | return -ENODEV; |
585 | } | 658 | } |
586 | 659 | ||
587 | static inline void iommu_register_instance(struct fwnode_handle *fwnode, | ||
588 | const struct iommu_ops *ops) | ||
589 | { | ||
590 | } | ||
591 | |||
592 | static inline | 660 | static inline |
593 | const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) | 661 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) |
594 | { | 662 | { |
595 | return NULL; | 663 | return NULL; |
596 | } | 664 | } |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index ffb84604c1de..188eced6813e 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -183,6 +183,12 @@ enum { | |||
183 | /* Irq domain is an IPI domain with single virq */ | 183 | /* Irq domain is an IPI domain with single virq */ |
184 | IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), | 184 | IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), |
185 | 185 | ||
186 | /* Irq domain implements MSIs */ | ||
187 | IRQ_DOMAIN_FLAG_MSI = (1 << 4), | ||
188 | |||
189 | /* Irq domain implements MSI remapping */ | ||
190 | IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5), | ||
191 | |||
186 | /* | 192 | /* |
187 | * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved | 193 | * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved |
188 | * for implementation specific purposes and ignored by the | 194 | * for implementation specific purposes and ignored by the |
@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
216 | void *host_data); | 222 | void *host_data); |
217 | extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, | 223 | extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, |
218 | enum irq_domain_bus_token bus_token); | 224 | enum irq_domain_bus_token bus_token); |
225 | extern bool irq_domain_check_msi_remap(void); | ||
219 | extern void irq_set_default_host(struct irq_domain *host); | 226 | extern void irq_set_default_host(struct irq_domain *host); |
220 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, | 227 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, |
221 | irq_hw_number_t hwirq, int node, | 228 | irq_hw_number_t hwirq, int node, |
@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) | |||
446 | { | 453 | { |
447 | return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; | 454 | return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; |
448 | } | 455 | } |
456 | |||
457 | static inline bool irq_domain_is_msi(struct irq_domain *domain) | ||
458 | { | ||
459 | return domain->flags & IRQ_DOMAIN_FLAG_MSI; | ||
460 | } | ||
461 | |||
462 | static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) | ||
463 | { | ||
464 | return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP; | ||
465 | } | ||
466 | |||
467 | extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); | ||
468 | |||
449 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | 469 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ |
450 | static inline void irq_domain_activate_irq(struct irq_data *data) { } | 470 | static inline void irq_domain_activate_irq(struct irq_data *data) { } |
451 | static inline void irq_domain_deactivate_irq(struct irq_data *data) { } | 471 | static inline void irq_domain_deactivate_irq(struct irq_data *data) { } |
@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) | |||
477 | { | 497 | { |
478 | return false; | 498 | return false; |
479 | } | 499 | } |
500 | |||
501 | static inline bool irq_domain_is_msi(struct irq_domain *domain) | ||
502 | { | ||
503 | return false; | ||
504 | } | ||
505 | |||
506 | static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) | ||
507 | { | ||
508 | return false; | ||
509 | } | ||
510 | |||
511 | static inline bool | ||
512 | irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) | ||
513 | { | ||
514 | return false; | ||
515 | } | ||
480 | #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | 516 | #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ |
481 | 517 | ||
482 | #else /* CONFIG_IRQ_DOMAIN */ | 518 | #else /* CONFIG_IRQ_DOMAIN */ |
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index 6a7fc5051099..13394ac83c66 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h | |||
@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
31 | 31 | ||
32 | #endif /* CONFIG_OF_IOMMU */ | 32 | #endif /* CONFIG_OF_IOMMU */ |
33 | 33 | ||
34 | static inline void of_iommu_set_ops(struct device_node *np, | ||
35 | const struct iommu_ops *ops) | ||
36 | { | ||
37 | iommu_register_instance(&np->fwnode, ops); | ||
38 | } | ||
39 | |||
40 | static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np) | ||
41 | { | ||
42 | return iommu_get_instance(&np->fwnode); | ||
43 | } | ||
44 | |||
45 | extern struct of_device_id __iommu_of_table; | 34 | extern struct of_device_id __iommu_of_table; |
46 | 35 | ||
47 | typedef int (*of_iommu_init_fn)(struct device_node *); | 36 | typedef int (*of_iommu_init_fn)(struct device_node *); |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index b59e6768c5e9..31805f237396 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -278,6 +278,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, | |||
278 | EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); | 278 | EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); |
279 | 279 | ||
280 | /** | 280 | /** |
281 | * irq_domain_check_msi_remap - Check whether all MSI irq domains implement | ||
282 | * IRQ remapping | ||
283 | * | ||
284 | * Return: false if any MSI irq domain does not support IRQ remapping, | ||
285 | * true otherwise (including if there is no MSI irq domain) | ||
286 | */ | ||
287 | bool irq_domain_check_msi_remap(void) | ||
288 | { | ||
289 | struct irq_domain *h; | ||
290 | bool ret = true; | ||
291 | |||
292 | mutex_lock(&irq_domain_mutex); | ||
293 | list_for_each_entry(h, &irq_domain_list, link) { | ||
294 | if (irq_domain_is_msi(h) && | ||
295 | !irq_domain_hierarchical_is_msi_remap(h)) { | ||
296 | ret = false; | ||
297 | break; | ||
298 | } | ||
299 | } | ||
300 | mutex_unlock(&irq_domain_mutex); | ||
301 | return ret; | ||
302 | } | ||
303 | EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap); | ||
304 | |||
305 | /** | ||
281 | * irq_set_default_host() - Set a "default" irq domain | 306 | * irq_set_default_host() - Set a "default" irq domain |
282 | * @domain: default domain pointer | 307 | * @domain: default domain pointer |
283 | * | 308 | * |
@@ -1408,6 +1433,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain) | |||
1408 | if (domain->ops->alloc) | 1433 | if (domain->ops->alloc) |
1409 | domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; | 1434 | domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; |
1410 | } | 1435 | } |
1436 | |||
1437 | /** | ||
1438 | * irq_domain_hierarchical_is_msi_remap - Check if the domain or any | ||
1439 | * parent has MSI remapping support | ||
1440 | * @domain: domain pointer | ||
1441 | */ | ||
1442 | bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) | ||
1443 | { | ||
1444 | for (; domain; domain = domain->parent) { | ||
1445 | if (irq_domain_is_msi_remap(domain)) | ||
1446 | return true; | ||
1447 | } | ||
1448 | return false; | ||
1449 | } | ||
1411 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | 1450 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ |
1412 | /** | 1451 | /** |
1413 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain | 1452 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain |
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index ee230063f033..ddc2f5427f75 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c | |||
@@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, | |||
270 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | 270 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) |
271 | msi_domain_update_chip_ops(info); | 271 | msi_domain_update_chip_ops(info); |
272 | 272 | ||
273 | return irq_domain_create_hierarchy(parent, 0, 0, fwnode, | 273 | return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0, |
274 | &msi_domain_ops, info); | 274 | fwnode, &msi_domain_ops, info); |
275 | } | 275 | } |
276 | 276 | ||
277 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, | 277 | int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, |