diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/iommu/Kconfig | 19 | ||||
| -rw-r--r-- | drivers/iommu/Makefile | 3 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu.c | 4 | ||||
| -rw-r--r-- | drivers/iommu/intel-iommu.c | 2 | ||||
| -rw-r--r-- | drivers/iommu/iommu.c | 114 | ||||
| -rw-r--r-- | drivers/iommu/msm_iommu.c | 9 | ||||
| -rw-r--r-- | drivers/iommu/omap-iommu-debug.c | 418 | ||||
| -rw-r--r-- | drivers/iommu/omap-iommu.c | 1245 | ||||
| -rw-r--r-- | drivers/iommu/omap-iovmm.c | 742 | ||||
| -rw-r--r-- | drivers/media/video/Kconfig | 3 | ||||
| -rw-r--r-- | drivers/media/video/omap3isp/isp.c | 45 | ||||
| -rw-r--r-- | drivers/media/video/omap3isp/isp.h | 5 | ||||
| -rw-r--r-- | drivers/media/video/omap3isp/ispccdc.c | 25 | ||||
| -rw-r--r-- | drivers/media/video/omap3isp/ispstat.c | 11 | ||||
| -rw-r--r-- | drivers/media/video/omap3isp/ispvideo.c | 4 |
15 files changed, 2589 insertions, 60 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 7d7eaa15e773..5414253b185a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
| @@ -112,4 +112,23 @@ config IRQ_REMAP | |||
| 112 | To use x2apic mode in the CPU's which support x2APIC enhancements or | 112 | To use x2apic mode in the CPU's which support x2APIC enhancements or |
| 113 | to support platforms with CPU's having > 8 bit APIC ID, say Y. | 113 | to support platforms with CPU's having > 8 bit APIC ID, say Y. |
| 114 | 114 | ||
| 115 | # OMAP IOMMU support | ||
| 116 | config OMAP_IOMMU | ||
| 117 | bool "OMAP IOMMU Support" | ||
| 118 | depends on ARCH_OMAP | ||
| 119 | select IOMMU_API | ||
| 120 | |||
| 121 | config OMAP_IOVMM | ||
| 122 | tristate "OMAP IO Virtual Memory Manager Support" | ||
| 123 | depends on OMAP_IOMMU | ||
| 124 | |||
| 125 | config OMAP_IOMMU_DEBUG | ||
| 126 | tristate "Export OMAP IOMMU/IOVMM internals in DebugFS" | ||
| 127 | depends on OMAP_IOVMM && DEBUG_FS | ||
| 128 | help | ||
| 129 | Select this to see extensive information about | ||
| 130 | the internal state of OMAP IOMMU/IOVMM in debugfs. | ||
| 131 | |||
| 132 | Say N unless you know you need this. | ||
| 133 | |||
| 115 | endif # IOMMU_SUPPORT | 134 | endif # IOMMU_SUPPORT |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 6394994a2b9d..2f4448794bc7 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
| @@ -4,3 +4,6 @@ obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | |||
| 4 | obj-$(CONFIG_DMAR_TABLE) += dmar.o | 4 | obj-$(CONFIG_DMAR_TABLE) += dmar.o |
| 5 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o | 5 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o |
| 6 | obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o | 6 | obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o |
| 7 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o | ||
| 8 | obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o | ||
| 9 | obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o | ||
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 0e4227f457af..4ee277a8521a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -1283,7 +1283,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
| 1283 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 1283 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
| 1284 | continue; | 1284 | continue; |
| 1285 | 1285 | ||
| 1286 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); | 1286 | dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1); |
| 1287 | } | 1287 | } |
| 1288 | 1288 | ||
| 1289 | update_domain(&dma_dom->domain); | 1289 | update_domain(&dma_dom->domain); |
| @@ -2495,7 +2495,7 @@ static unsigned device_dma_ops_init(void) | |||
| 2495 | 2495 | ||
| 2496 | void __init amd_iommu_init_api(void) | 2496 | void __init amd_iommu_init_api(void) |
| 2497 | { | 2497 | { |
| 2498 | register_iommu(&amd_iommu_ops); | 2498 | bus_set_iommu(&pci_bus_type, &amd_iommu_ops); |
| 2499 | } | 2499 | } |
| 2500 | 2500 | ||
| 2501 | int __init amd_iommu_init_dma_ops(void) | 2501 | int __init amd_iommu_init_dma_ops(void) |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index be1953c239b0..bb161d2fa03c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -3642,7 +3642,7 @@ int __init intel_iommu_init(void) | |||
| 3642 | 3642 | ||
| 3643 | init_iommu_pm_ops(); | 3643 | init_iommu_pm_ops(); |
| 3644 | 3644 | ||
| 3645 | register_iommu(&intel_iommu_ops); | 3645 | bus_set_iommu(&pci_bus_type, &intel_iommu_ops); |
| 3646 | 3646 | ||
| 3647 | bus_register_notifier(&pci_bus_type, &device_nb); | 3647 | bus_register_notifier(&pci_bus_type, &device_nb); |
| 3648 | 3648 | ||
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 6e6b6a11b3ce..2fb2963df553 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -16,6 +16,8 @@ | |||
| 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/device.h> | ||
| 20 | #include <linux/kernel.h> | ||
| 19 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
| 20 | #include <linux/types.h> | 22 | #include <linux/types.h> |
| 21 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| @@ -23,32 +25,78 @@ | |||
| 23 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
| 24 | #include <linux/iommu.h> | 26 | #include <linux/iommu.h> |
| 25 | 27 | ||
| 26 | static struct iommu_ops *iommu_ops; | 28 | static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) |
| 29 | { | ||
| 30 | } | ||
| 27 | 31 | ||
| 28 | void register_iommu(struct iommu_ops *ops) | 32 | /** |
| 33 | * bus_set_iommu - set iommu-callbacks for the bus | ||
| 34 | * @bus: bus. | ||
| 35 | * @ops: the callbacks provided by the iommu-driver | ||
| 36 | * | ||
| 37 | * This function is called by an iommu driver to set the iommu methods | ||
| 38 | * used for a particular bus. Drivers for devices on that bus can use | ||
| 39 | * the iommu-api after these ops are registered. | ||
| 40 | * This special function is needed because IOMMUs are usually devices on | ||
| 41 | * the bus itself, so the iommu drivers are not initialized when the bus | ||
| 42 | * is set up. With this function the iommu-driver can set the iommu-ops | ||
| 43 | * afterwards. | ||
| 44 | */ | ||
| 45 | int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops) | ||
| 29 | { | 46 | { |
| 30 | if (iommu_ops) | 47 | if (bus->iommu_ops != NULL) |
| 31 | BUG(); | 48 | return -EBUSY; |
| 49 | |||
| 50 | bus->iommu_ops = ops; | ||
| 51 | |||
| 52 | /* Do IOMMU specific setup for this bus-type */ | ||
| 53 | iommu_bus_init(bus, ops); | ||
| 32 | 54 | ||
| 33 | iommu_ops = ops; | 55 | return 0; |
| 34 | } | 56 | } |
| 57 | EXPORT_SYMBOL_GPL(bus_set_iommu); | ||
| 35 | 58 | ||
| 36 | bool iommu_found(void) | 59 | bool iommu_present(struct bus_type *bus) |
| 37 | { | 60 | { |
| 38 | return iommu_ops != NULL; | 61 | return bus->iommu_ops != NULL; |
| 39 | } | 62 | } |
| 40 | EXPORT_SYMBOL_GPL(iommu_found); | 63 | EXPORT_SYMBOL_GPL(iommu_present); |
| 41 | 64 | ||
| 42 | struct iommu_domain *iommu_domain_alloc(void) | 65 | /** |
| 66 | * iommu_set_fault_handler() - set a fault handler for an iommu domain | ||
| 67 | * @domain: iommu domain | ||
| 68 | * @handler: fault handler | ||
| 69 | * | ||
| 70 | * This function should be used by IOMMU users which want to be notified | ||
| 71 | * whenever an IOMMU fault happens. | ||
| 72 | * | ||
| 73 | * The fault handler itself should return 0 on success, and an appropriate | ||
| 74 | * error code otherwise. | ||
| 75 | */ | ||
| 76 | void iommu_set_fault_handler(struct iommu_domain *domain, | ||
| 77 | iommu_fault_handler_t handler) | ||
| 78 | { | ||
| 79 | BUG_ON(!domain); | ||
| 80 | |||
| 81 | domain->handler = handler; | ||
| 82 | } | ||
| 83 | EXPORT_SYMBOL_GPL(iommu_set_fault_handler); | ||
| 84 | |||
| 85 | struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) | ||
| 43 | { | 86 | { |
| 44 | struct iommu_domain *domain; | 87 | struct iommu_domain *domain; |
| 45 | int ret; | 88 | int ret; |
| 46 | 89 | ||
| 90 | if (bus == NULL || bus->iommu_ops == NULL) | ||
| 91 | return NULL; | ||
| 92 | |||
| 47 | domain = kmalloc(sizeof(*domain), GFP_KERNEL); | 93 | domain = kmalloc(sizeof(*domain), GFP_KERNEL); |
| 48 | if (!domain) | 94 | if (!domain) |
| 49 | return NULL; | 95 | return NULL; |
| 50 | 96 | ||
| 51 | ret = iommu_ops->domain_init(domain); | 97 | domain->ops = bus->iommu_ops; |
| 98 | |||
| 99 | ret = domain->ops->domain_init(domain); | ||
| 52 | if (ret) | 100 | if (ret) |
| 53 | goto out_free; | 101 | goto out_free; |
| 54 | 102 | ||
| @@ -63,62 +111,78 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc); | |||
| 63 | 111 | ||
| 64 | void iommu_domain_free(struct iommu_domain *domain) | 112 | void iommu_domain_free(struct iommu_domain *domain) |
| 65 | { | 113 | { |
| 66 | iommu_ops->domain_destroy(domain); | 114 | if (likely(domain->ops->domain_destroy != NULL)) |
| 115 | domain->ops->domain_destroy(domain); | ||
| 116 | |||
| 67 | kfree(domain); | 117 | kfree(domain); |
| 68 | } | 118 | } |
| 69 | EXPORT_SYMBOL_GPL(iommu_domain_free); | 119 | EXPORT_SYMBOL_GPL(iommu_domain_free); |
| 70 | 120 | ||
| 71 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) | 121 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) |
| 72 | { | 122 | { |
| 73 | return iommu_ops->attach_dev(domain, dev); | 123 | if (unlikely(domain->ops->attach_dev == NULL)) |
| 124 | return -ENODEV; | ||
| 125 | |||
| 126 | return domain->ops->attach_dev(domain, dev); | ||
| 74 | } | 127 | } |
| 75 | EXPORT_SYMBOL_GPL(iommu_attach_device); | 128 | EXPORT_SYMBOL_GPL(iommu_attach_device); |
| 76 | 129 | ||
| 77 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) | 130 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) |
| 78 | { | 131 | { |
| 79 | iommu_ops->detach_dev(domain, dev); | 132 | if (unlikely(domain->ops->detach_dev == NULL)) |
| 133 | return; | ||
| 134 | |||
| 135 | domain->ops->detach_dev(domain, dev); | ||
| 80 | } | 136 | } |
| 81 | EXPORT_SYMBOL_GPL(iommu_detach_device); | 137 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
| 82 | 138 | ||
| 83 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 139 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
| 84 | unsigned long iova) | 140 | unsigned long iova) |
| 85 | { | 141 | { |
| 86 | return iommu_ops->iova_to_phys(domain, iova); | 142 | if (unlikely(domain->ops->iova_to_phys == NULL)) |
| 143 | return 0; | ||
| 144 | |||
| 145 | return domain->ops->iova_to_phys(domain, iova); | ||
| 87 | } | 146 | } |
| 88 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); | 147 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); |
| 89 | 148 | ||
| 90 | int iommu_domain_has_cap(struct iommu_domain *domain, | 149 | int iommu_domain_has_cap(struct iommu_domain *domain, |
| 91 | unsigned long cap) | 150 | unsigned long cap) |
| 92 | { | 151 | { |
| 93 | return iommu_ops->domain_has_cap(domain, cap); | 152 | if (unlikely(domain->ops->domain_has_cap == NULL)) |
| 153 | return 0; | ||
| 154 | |||
| 155 | return domain->ops->domain_has_cap(domain, cap); | ||
| 94 | } | 156 | } |
| 95 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | 157 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
| 96 | 158 | ||
| 97 | int iommu_map(struct iommu_domain *domain, unsigned long iova, | 159 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
| 98 | phys_addr_t paddr, int gfp_order, int prot) | 160 | phys_addr_t paddr, int gfp_order, int prot) |
| 99 | { | 161 | { |
| 100 | unsigned long invalid_mask; | ||
| 101 | size_t size; | 162 | size_t size; |
| 102 | 163 | ||
| 103 | size = 0x1000UL << gfp_order; | 164 | if (unlikely(domain->ops->map == NULL)) |
| 104 | invalid_mask = size - 1; | 165 | return -ENODEV; |
| 105 | 166 | ||
| 106 | BUG_ON((iova | paddr) & invalid_mask); | 167 | size = PAGE_SIZE << gfp_order; |
| 107 | 168 | ||
| 108 | return iommu_ops->map(domain, iova, paddr, gfp_order, prot); | 169 | BUG_ON(!IS_ALIGNED(iova | paddr, size)); |
| 170 | |||
| 171 | return domain->ops->map(domain, iova, paddr, gfp_order, prot); | ||
| 109 | } | 172 | } |
| 110 | EXPORT_SYMBOL_GPL(iommu_map); | 173 | EXPORT_SYMBOL_GPL(iommu_map); |
| 111 | 174 | ||
| 112 | int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) | 175 | int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) |
| 113 | { | 176 | { |
| 114 | unsigned long invalid_mask; | ||
| 115 | size_t size; | 177 | size_t size; |
| 116 | 178 | ||
| 117 | size = 0x1000UL << gfp_order; | 179 | if (unlikely(domain->ops->unmap == NULL)) |
| 118 | invalid_mask = size - 1; | 180 | return -ENODEV; |
| 181 | |||
| 182 | size = PAGE_SIZE << gfp_order; | ||
| 119 | 183 | ||
| 120 | BUG_ON(iova & invalid_mask); | 184 | BUG_ON(!IS_ALIGNED(iova, size)); |
| 121 | 185 | ||
| 122 | return iommu_ops->unmap(domain, iova, gfp_order); | 186 | return domain->ops->unmap(domain, iova, gfp_order); |
| 123 | } | 187 | } |
| 124 | EXPORT_SYMBOL_GPL(iommu_unmap); | 188 | EXPORT_SYMBOL_GPL(iommu_unmap); |
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 1a584e077c61..5865dd2e28f9 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
| @@ -543,6 +543,13 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, | |||
| 543 | } | 543 | } |
| 544 | 544 | ||
| 545 | ret = __flush_iotlb(domain); | 545 | ret = __flush_iotlb(domain); |
| 546 | |||
| 547 | /* | ||
| 548 | * the IOMMU API requires us to return the order of the unmapped | ||
| 549 | * page (on success). | ||
| 550 | */ | ||
| 551 | if (!ret) | ||
| 552 | ret = order; | ||
| 546 | fail: | 553 | fail: |
| 547 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 554 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
| 548 | return ret; | 555 | return ret; |
| @@ -721,7 +728,7 @@ static void __init setup_iommu_tex_classes(void) | |||
| 721 | static int __init msm_iommu_init(void) | 728 | static int __init msm_iommu_init(void) |
| 722 | { | 729 | { |
| 723 | setup_iommu_tex_classes(); | 730 | setup_iommu_tex_classes(); |
| 724 | register_iommu(&msm_iommu_ops); | 731 | bus_set_iommu(&platform_bus_type, &msm_iommu_ops); |
| 725 | return 0; | 732 | return 0; |
| 726 | } | 733 | } |
| 727 | 734 | ||
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c new file mode 100644 index 000000000000..9c192e79f806 --- /dev/null +++ b/drivers/iommu/omap-iommu-debug.c | |||
| @@ -0,0 +1,418 @@ | |||
| 1 | /* | ||
| 2 | * omap iommu: debugfs interface | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
| 5 | * | ||
| 6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/err.h> | ||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/io.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/uaccess.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/debugfs.h> | ||
| 20 | |||
| 21 | #include <plat/iommu.h> | ||
| 22 | #include <plat/iovmm.h> | ||
| 23 | |||
| 24 | #include <plat/iopgtable.h> | ||
| 25 | |||
| 26 | #define MAXCOLUMN 100 /* for short messages */ | ||
| 27 | |||
| 28 | static DEFINE_MUTEX(iommu_debug_lock); | ||
| 29 | |||
| 30 | static struct dentry *iommu_debug_root; | ||
| 31 | |||
| 32 | static ssize_t debug_read_ver(struct file *file, char __user *userbuf, | ||
| 33 | size_t count, loff_t *ppos) | ||
| 34 | { | ||
| 35 | u32 ver = omap_iommu_arch_version(); | ||
| 36 | char buf[MAXCOLUMN], *p = buf; | ||
| 37 | |||
| 38 | p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); | ||
| 39 | |||
| 40 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
| 41 | } | ||
| 42 | |||
| 43 | static ssize_t debug_read_regs(struct file *file, char __user *userbuf, | ||
| 44 | size_t count, loff_t *ppos) | ||
| 45 | { | ||
| 46 | struct omap_iommu *obj = file->private_data; | ||
| 47 | char *p, *buf; | ||
| 48 | ssize_t bytes; | ||
| 49 | |||
| 50 | buf = kmalloc(count, GFP_KERNEL); | ||
| 51 | if (!buf) | ||
| 52 | return -ENOMEM; | ||
| 53 | p = buf; | ||
| 54 | |||
| 55 | mutex_lock(&iommu_debug_lock); | ||
| 56 | |||
| 57 | bytes = omap_iommu_dump_ctx(obj, p, count); | ||
| 58 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); | ||
| 59 | |||
| 60 | mutex_unlock(&iommu_debug_lock); | ||
| 61 | kfree(buf); | ||
| 62 | |||
| 63 | return bytes; | ||
| 64 | } | ||
| 65 | |||
| 66 | static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, | ||
| 67 | size_t count, loff_t *ppos) | ||
| 68 | { | ||
| 69 | struct omap_iommu *obj = file->private_data; | ||
| 70 | char *p, *buf; | ||
| 71 | ssize_t bytes, rest; | ||
| 72 | |||
| 73 | buf = kmalloc(count, GFP_KERNEL); | ||
| 74 | if (!buf) | ||
| 75 | return -ENOMEM; | ||
| 76 | p = buf; | ||
| 77 | |||
| 78 | mutex_lock(&iommu_debug_lock); | ||
| 79 | |||
| 80 | p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); | ||
| 81 | p += sprintf(p, "-----------------------------------------\n"); | ||
| 82 | rest = count - (p - buf); | ||
| 83 | p += omap_dump_tlb_entries(obj, p, rest); | ||
| 84 | |||
| 85 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
| 86 | |||
| 87 | mutex_unlock(&iommu_debug_lock); | ||
| 88 | kfree(buf); | ||
| 89 | |||
| 90 | return bytes; | ||
| 91 | } | ||
| 92 | |||
| 93 | static ssize_t debug_write_pagetable(struct file *file, | ||
| 94 | const char __user *userbuf, size_t count, loff_t *ppos) | ||
| 95 | { | ||
| 96 | struct iotlb_entry e; | ||
| 97 | struct cr_regs cr; | ||
| 98 | int err; | ||
| 99 | struct omap_iommu *obj = file->private_data; | ||
| 100 | char buf[MAXCOLUMN], *p = buf; | ||
| 101 | |||
| 102 | count = min(count, sizeof(buf)); | ||
| 103 | |||
| 104 | mutex_lock(&iommu_debug_lock); | ||
| 105 | if (copy_from_user(p, userbuf, count)) { | ||
| 106 | mutex_unlock(&iommu_debug_lock); | ||
| 107 | return -EFAULT; | ||
| 108 | } | ||
| 109 | |||
| 110 | sscanf(p, "%x %x", &cr.cam, &cr.ram); | ||
| 111 | if (!cr.cam || !cr.ram) { | ||
| 112 | mutex_unlock(&iommu_debug_lock); | ||
| 113 | return -EINVAL; | ||
| 114 | } | ||
| 115 | |||
| 116 | omap_iotlb_cr_to_e(&cr, &e); | ||
| 117 | err = omap_iopgtable_store_entry(obj, &e); | ||
| 118 | if (err) | ||
| 119 | dev_err(obj->dev, "%s: fail to store cr\n", __func__); | ||
| 120 | |||
| 121 | mutex_unlock(&iommu_debug_lock); | ||
| 122 | return count; | ||
| 123 | } | ||
| 124 | |||
| 125 | #define dump_ioptable_entry_one(lv, da, val) \ | ||
| 126 | ({ \ | ||
| 127 | int __err = 0; \ | ||
| 128 | ssize_t bytes; \ | ||
| 129 | const int maxcol = 22; \ | ||
| 130 | const char *str = "%d: %08x %08x\n"; \ | ||
| 131 | bytes = snprintf(p, maxcol, str, lv, da, val); \ | ||
| 132 | p += bytes; \ | ||
| 133 | len -= bytes; \ | ||
| 134 | if (len < maxcol) \ | ||
| 135 | __err = -ENOMEM; \ | ||
| 136 | __err; \ | ||
| 137 | }) | ||
| 138 | |||
| 139 | static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) | ||
| 140 | { | ||
| 141 | int i; | ||
| 142 | u32 *iopgd; | ||
| 143 | char *p = buf; | ||
| 144 | |||
| 145 | spin_lock(&obj->page_table_lock); | ||
| 146 | |||
| 147 | iopgd = iopgd_offset(obj, 0); | ||
| 148 | for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { | ||
| 149 | int j, err; | ||
| 150 | u32 *iopte; | ||
| 151 | u32 da; | ||
| 152 | |||
| 153 | if (!*iopgd) | ||
| 154 | continue; | ||
| 155 | |||
| 156 | if (!(*iopgd & IOPGD_TABLE)) { | ||
| 157 | da = i << IOPGD_SHIFT; | ||
| 158 | |||
| 159 | err = dump_ioptable_entry_one(1, da, *iopgd); | ||
| 160 | if (err) | ||
| 161 | goto out; | ||
| 162 | continue; | ||
| 163 | } | ||
| 164 | |||
| 165 | iopte = iopte_offset(iopgd, 0); | ||
| 166 | |||
| 167 | for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { | ||
| 168 | if (!*iopte) | ||
| 169 | continue; | ||
| 170 | |||
| 171 | da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); | ||
| 172 | err = dump_ioptable_entry_one(2, da, *iopgd); | ||
| 173 | if (err) | ||
| 174 | goto out; | ||
| 175 | } | ||
| 176 | } | ||
| 177 | out: | ||
| 178 | spin_unlock(&obj->page_table_lock); | ||
| 179 | |||
| 180 | return p - buf; | ||
| 181 | } | ||
| 182 | |||
| 183 | static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, | ||
| 184 | size_t count, loff_t *ppos) | ||
| 185 | { | ||
| 186 | struct omap_iommu *obj = file->private_data; | ||
| 187 | char *p, *buf; | ||
| 188 | size_t bytes; | ||
| 189 | |||
| 190 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
| 191 | if (!buf) | ||
| 192 | return -ENOMEM; | ||
| 193 | p = buf; | ||
| 194 | |||
| 195 | p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); | ||
| 196 | p += sprintf(p, "-----------------------------------------\n"); | ||
| 197 | |||
| 198 | mutex_lock(&iommu_debug_lock); | ||
| 199 | |||
| 200 | bytes = PAGE_SIZE - (p - buf); | ||
| 201 | p += dump_ioptable(obj, p, bytes); | ||
| 202 | |||
| 203 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
| 204 | |||
| 205 | mutex_unlock(&iommu_debug_lock); | ||
| 206 | free_page((unsigned long)buf); | ||
| 207 | |||
| 208 | return bytes; | ||
| 209 | } | ||
| 210 | |||
| 211 | static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, | ||
| 212 | size_t count, loff_t *ppos) | ||
| 213 | { | ||
| 214 | struct omap_iommu *obj = file->private_data; | ||
| 215 | char *p, *buf; | ||
| 216 | struct iovm_struct *tmp; | ||
| 217 | int uninitialized_var(i); | ||
| 218 | ssize_t bytes; | ||
| 219 | |||
| 220 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
| 221 | if (!buf) | ||
| 222 | return -ENOMEM; | ||
| 223 | p = buf; | ||
| 224 | |||
| 225 | p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n", | ||
| 226 | "No", "start", "end", "size", "flags"); | ||
| 227 | p += sprintf(p, "-------------------------------------------------\n"); | ||
| 228 | |||
| 229 | mutex_lock(&iommu_debug_lock); | ||
| 230 | |||
| 231 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
| 232 | size_t len; | ||
| 233 | const char *str = "%3d %08x-%08x %6x %8x\n"; | ||
| 234 | const int maxcol = 39; | ||
| 235 | |||
| 236 | len = tmp->da_end - tmp->da_start; | ||
| 237 | p += snprintf(p, maxcol, str, | ||
| 238 | i, tmp->da_start, tmp->da_end, len, tmp->flags); | ||
| 239 | |||
| 240 | if (PAGE_SIZE - (p - buf) < maxcol) | ||
| 241 | break; | ||
| 242 | i++; | ||
| 243 | } | ||
| 244 | |||
| 245 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
| 246 | |||
| 247 | mutex_unlock(&iommu_debug_lock); | ||
| 248 | free_page((unsigned long)buf); | ||
| 249 | |||
| 250 | return bytes; | ||
| 251 | } | ||
| 252 | |||
| 253 | static ssize_t debug_read_mem(struct file *file, char __user *userbuf, | ||
| 254 | size_t count, loff_t *ppos) | ||
| 255 | { | ||
| 256 | struct omap_iommu *obj = file->private_data; | ||
| 257 | char *p, *buf; | ||
| 258 | struct iovm_struct *area; | ||
| 259 | ssize_t bytes; | ||
| 260 | |||
| 261 | count = min_t(ssize_t, count, PAGE_SIZE); | ||
| 262 | |||
| 263 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
| 264 | if (!buf) | ||
| 265 | return -ENOMEM; | ||
| 266 | p = buf; | ||
| 267 | |||
| 268 | mutex_lock(&iommu_debug_lock); | ||
| 269 | |||
| 270 | area = omap_find_iovm_area(obj, (u32)ppos); | ||
| 271 | if (IS_ERR(area)) { | ||
| 272 | bytes = -EINVAL; | ||
| 273 | goto err_out; | ||
| 274 | } | ||
| 275 | memcpy(p, area->va, count); | ||
| 276 | p += count; | ||
| 277 | |||
| 278 | bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
| 279 | err_out: | ||
| 280 | mutex_unlock(&iommu_debug_lock); | ||
| 281 | free_page((unsigned long)buf); | ||
| 282 | |||
| 283 | return bytes; | ||
| 284 | } | ||
| 285 | |||
| 286 | static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, | ||
| 287 | size_t count, loff_t *ppos) | ||
| 288 | { | ||
| 289 | struct omap_iommu *obj = file->private_data; | ||
| 290 | struct iovm_struct *area; | ||
| 291 | char *p, *buf; | ||
| 292 | |||
| 293 | count = min_t(size_t, count, PAGE_SIZE); | ||
| 294 | |||
| 295 | buf = (char *)__get_free_page(GFP_KERNEL); | ||
| 296 | if (!buf) | ||
| 297 | return -ENOMEM; | ||
| 298 | p = buf; | ||
| 299 | |||
| 300 | mutex_lock(&iommu_debug_lock); | ||
| 301 | |||
| 302 | if (copy_from_user(p, userbuf, count)) { | ||
| 303 | count = -EFAULT; | ||
| 304 | goto err_out; | ||
| 305 | } | ||
| 306 | |||
| 307 | area = omap_find_iovm_area(obj, (u32)ppos); | ||
| 308 | if (IS_ERR(area)) { | ||
| 309 | count = -EINVAL; | ||
| 310 | goto err_out; | ||
| 311 | } | ||
| 312 | memcpy(area->va, p, count); | ||
| 313 | err_out: | ||
| 314 | mutex_unlock(&iommu_debug_lock); | ||
| 315 | free_page((unsigned long)buf); | ||
| 316 | |||
| 317 | return count; | ||
| 318 | } | ||
| 319 | |||
| 320 | static int debug_open_generic(struct inode *inode, struct file *file) | ||
| 321 | { | ||
| 322 | file->private_data = inode->i_private; | ||
| 323 | return 0; | ||
| 324 | } | ||
| 325 | |||
| 326 | #define DEBUG_FOPS(name) \ | ||
| 327 | static const struct file_operations debug_##name##_fops = { \ | ||
| 328 | .open = debug_open_generic, \ | ||
| 329 | .read = debug_read_##name, \ | ||
| 330 | .write = debug_write_##name, \ | ||
| 331 | .llseek = generic_file_llseek, \ | ||
| 332 | }; | ||
| 333 | |||
| 334 | #define DEBUG_FOPS_RO(name) \ | ||
| 335 | static const struct file_operations debug_##name##_fops = { \ | ||
| 336 | .open = debug_open_generic, \ | ||
| 337 | .read = debug_read_##name, \ | ||
| 338 | .llseek = generic_file_llseek, \ | ||
| 339 | }; | ||
| 340 | |||
| 341 | DEBUG_FOPS_RO(ver); | ||
| 342 | DEBUG_FOPS_RO(regs); | ||
| 343 | DEBUG_FOPS_RO(tlb); | ||
| 344 | DEBUG_FOPS(pagetable); | ||
| 345 | DEBUG_FOPS_RO(mmap); | ||
| 346 | DEBUG_FOPS(mem); | ||
| 347 | |||
| 348 | #define __DEBUG_ADD_FILE(attr, mode) \ | ||
| 349 | { \ | ||
| 350 | struct dentry *dent; \ | ||
| 351 | dent = debugfs_create_file(#attr, mode, parent, \ | ||
| 352 | obj, &debug_##attr##_fops); \ | ||
| 353 | if (!dent) \ | ||
| 354 | return -ENOMEM; \ | ||
| 355 | } | ||
| 356 | |||
| 357 | #define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) | ||
| 358 | #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) | ||
| 359 | |||
| 360 | static int iommu_debug_register(struct device *dev, void *data) | ||
| 361 | { | ||
| 362 | struct platform_device *pdev = to_platform_device(dev); | ||
| 363 | struct omap_iommu *obj = platform_get_drvdata(pdev); | ||
| 364 | struct dentry *d, *parent; | ||
| 365 | |||
| 366 | if (!obj || !obj->dev) | ||
| 367 | return -EINVAL; | ||
| 368 | |||
| 369 | d = debugfs_create_dir(obj->name, iommu_debug_root); | ||
| 370 | if (!d) | ||
| 371 | return -ENOMEM; | ||
| 372 | parent = d; | ||
| 373 | |||
| 374 | d = debugfs_create_u8("nr_tlb_entries", 400, parent, | ||
| 375 | (u8 *)&obj->nr_tlb_entries); | ||
| 376 | if (!d) | ||
| 377 | return -ENOMEM; | ||
| 378 | |||
| 379 | DEBUG_ADD_FILE_RO(ver); | ||
| 380 | DEBUG_ADD_FILE_RO(regs); | ||
| 381 | DEBUG_ADD_FILE_RO(tlb); | ||
| 382 | DEBUG_ADD_FILE(pagetable); | ||
| 383 | DEBUG_ADD_FILE_RO(mmap); | ||
| 384 | DEBUG_ADD_FILE(mem); | ||
| 385 | |||
| 386 | return 0; | ||
| 387 | } | ||
| 388 | |||
| 389 | static int __init iommu_debug_init(void) | ||
| 390 | { | ||
| 391 | struct dentry *d; | ||
| 392 | int err; | ||
| 393 | |||
| 394 | d = debugfs_create_dir("iommu", NULL); | ||
| 395 | if (!d) | ||
| 396 | return -ENOMEM; | ||
| 397 | iommu_debug_root = d; | ||
| 398 | |||
| 399 | err = omap_foreach_iommu_device(d, iommu_debug_register); | ||
| 400 | if (err) | ||
| 401 | goto err_out; | ||
| 402 | return 0; | ||
| 403 | |||
| 404 | err_out: | ||
| 405 | debugfs_remove_recursive(iommu_debug_root); | ||
| 406 | return err; | ||
| 407 | } | ||
| 408 | module_init(iommu_debug_init) | ||
| 409 | |||
| 410 | static void __exit iommu_debugfs_exit(void) | ||
| 411 | { | ||
| 412 | debugfs_remove_recursive(iommu_debug_root); | ||
| 413 | } | ||
| 414 | module_exit(iommu_debugfs_exit) | ||
| 415 | |||
| 416 | MODULE_DESCRIPTION("omap iommu: debugfs interface"); | ||
| 417 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | ||
| 418 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c new file mode 100644 index 000000000000..8f32b2bf7587 --- /dev/null +++ b/drivers/iommu/omap-iommu.c | |||
| @@ -0,0 +1,1245 @@ | |||
| 1 | /* | ||
| 2 | * omap iommu: tlb and pagetable primitives | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2010 Nokia Corporation | ||
| 5 | * | ||
| 6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | ||
| 7 | * Paul Mundt and Toshihiro Kobayashi | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/err.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/ioport.h> | ||
| 19 | #include <linux/clk.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/iommu.h> | ||
| 22 | #include <linux/mutex.h> | ||
| 23 | #include <linux/spinlock.h> | ||
| 24 | |||
| 25 | #include <asm/cacheflush.h> | ||
| 26 | |||
| 27 | #include <plat/iommu.h> | ||
| 28 | |||
| 29 | #include <plat/iopgtable.h> | ||
| 30 | |||
| 31 | #define for_each_iotlb_cr(obj, n, __i, cr) \ | ||
| 32 | for (__i = 0; \ | ||
| 33 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ | ||
| 34 | __i++) | ||
| 35 | |||
| 36 | /** | ||
| 37 | * struct omap_iommu_domain - omap iommu domain | ||
| 38 | * @pgtable: the page table | ||
| 39 | * @iommu_dev: an omap iommu device attached to this domain. only a single | ||
| 40 | * iommu device can be attached for now. | ||
| 41 | * @lock: domain lock, should be taken when attaching/detaching | ||
| 42 | */ | ||
| 43 | struct omap_iommu_domain { | ||
| 44 | u32 *pgtable; | ||
| 45 | struct omap_iommu *iommu_dev; | ||
| 46 | spinlock_t lock; | ||
| 47 | }; | ||
| 48 | |||
| 49 | /* accommodate the difference between omap1 and omap2/3 */ | ||
| 50 | static const struct iommu_functions *arch_iommu; | ||
| 51 | |||
| 52 | static struct platform_driver omap_iommu_driver; | ||
| 53 | static struct kmem_cache *iopte_cachep; | ||
| 54 | |||
| 55 | /** | ||
| 56 | * omap_install_iommu_arch - Install archtecure specific iommu functions | ||
| 57 | * @ops: a pointer to architecture specific iommu functions | ||
| 58 | * | ||
| 59 | * There are several kind of iommu algorithm(tlb, pagetable) among | ||
| 60 | * omap series. This interface installs such an iommu algorighm. | ||
| 61 | **/ | ||
| 62 | int omap_install_iommu_arch(const struct iommu_functions *ops) | ||
| 63 | { | ||
| 64 | if (arch_iommu) | ||
| 65 | return -EBUSY; | ||
| 66 | |||
| 67 | arch_iommu = ops; | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | EXPORT_SYMBOL_GPL(omap_install_iommu_arch); | ||
| 71 | |||
| 72 | /** | ||
| 73 | * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions | ||
| 74 | * @ops: a pointer to architecture specific iommu functions | ||
| 75 | * | ||
| 76 | * This interface uninstalls the iommu algorighm installed previously. | ||
| 77 | **/ | ||
| 78 | void omap_uninstall_iommu_arch(const struct iommu_functions *ops) | ||
| 79 | { | ||
| 80 | if (arch_iommu != ops) | ||
| 81 | pr_err("%s: not your arch\n", __func__); | ||
| 82 | |||
| 83 | arch_iommu = NULL; | ||
| 84 | } | ||
| 85 | EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); | ||
| 86 | |||
| 87 | /** | ||
| 88 | * omap_iommu_save_ctx - Save registers for pm off-mode support | ||
| 89 | * @obj: target iommu | ||
| 90 | **/ | ||
| 91 | void omap_iommu_save_ctx(struct omap_iommu *obj) | ||
| 92 | { | ||
| 93 | arch_iommu->save_ctx(obj); | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); | ||
| 96 | |||
| 97 | /** | ||
| 98 | * omap_iommu_restore_ctx - Restore registers for pm off-mode support | ||
| 99 | * @obj: target iommu | ||
| 100 | **/ | ||
| 101 | void omap_iommu_restore_ctx(struct omap_iommu *obj) | ||
| 102 | { | ||
| 103 | arch_iommu->restore_ctx(obj); | ||
| 104 | } | ||
| 105 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); | ||
| 106 | |||
| 107 | /** | ||
| 108 | * omap_iommu_arch_version - Return running iommu arch version | ||
| 109 | **/ | ||
| 110 | u32 omap_iommu_arch_version(void) | ||
| 111 | { | ||
| 112 | return arch_iommu->version; | ||
| 113 | } | ||
| 114 | EXPORT_SYMBOL_GPL(omap_iommu_arch_version); | ||
| 115 | |||
| 116 | static int iommu_enable(struct omap_iommu *obj) | ||
| 117 | { | ||
| 118 | int err; | ||
| 119 | |||
| 120 | if (!obj) | ||
| 121 | return -EINVAL; | ||
| 122 | |||
| 123 | if (!arch_iommu) | ||
| 124 | return -ENODEV; | ||
| 125 | |||
| 126 | clk_enable(obj->clk); | ||
| 127 | |||
| 128 | err = arch_iommu->enable(obj); | ||
| 129 | |||
| 130 | clk_disable(obj->clk); | ||
| 131 | return err; | ||
| 132 | } | ||
| 133 | |||
| 134 | static void iommu_disable(struct omap_iommu *obj) | ||
| 135 | { | ||
| 136 | if (!obj) | ||
| 137 | return; | ||
| 138 | |||
| 139 | clk_enable(obj->clk); | ||
| 140 | |||
| 141 | arch_iommu->disable(obj); | ||
| 142 | |||
| 143 | clk_disable(obj->clk); | ||
| 144 | } | ||
| 145 | |||
| 146 | /* | ||
| 147 | * TLB operations | ||
| 148 | */ | ||
| 149 | void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | ||
| 150 | { | ||
| 151 | BUG_ON(!cr || !e); | ||
| 152 | |||
| 153 | arch_iommu->cr_to_e(cr, e); | ||
| 154 | } | ||
| 155 | EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); | ||
| 156 | |||
| 157 | static inline int iotlb_cr_valid(struct cr_regs *cr) | ||
| 158 | { | ||
| 159 | if (!cr) | ||
| 160 | return -EINVAL; | ||
| 161 | |||
| 162 | return arch_iommu->cr_valid(cr); | ||
| 163 | } | ||
| 164 | |||
| 165 | static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, | ||
| 166 | struct iotlb_entry *e) | ||
| 167 | { | ||
| 168 | if (!e) | ||
| 169 | return NULL; | ||
| 170 | |||
| 171 | return arch_iommu->alloc_cr(obj, e); | ||
| 172 | } | ||
| 173 | |||
| 174 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) | ||
| 175 | { | ||
| 176 | return arch_iommu->cr_to_virt(cr); | ||
| 177 | } | ||
| 178 | |||
| 179 | static u32 get_iopte_attr(struct iotlb_entry *e) | ||
| 180 | { | ||
| 181 | return arch_iommu->get_pte_attr(e); | ||
| 182 | } | ||
| 183 | |||
| 184 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) | ||
| 185 | { | ||
| 186 | return arch_iommu->fault_isr(obj, da); | ||
| 187 | } | ||
| 188 | |||
| 189 | static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) | ||
| 190 | { | ||
| 191 | u32 val; | ||
| 192 | |||
| 193 | val = iommu_read_reg(obj, MMU_LOCK); | ||
| 194 | |||
| 195 | l->base = MMU_LOCK_BASE(val); | ||
| 196 | l->vict = MMU_LOCK_VICT(val); | ||
| 197 | |||
| 198 | } | ||
| 199 | |||
| 200 | static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) | ||
| 201 | { | ||
| 202 | u32 val; | ||
| 203 | |||
| 204 | val = (l->base << MMU_LOCK_BASE_SHIFT); | ||
| 205 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | ||
| 206 | |||
| 207 | iommu_write_reg(obj, val, MMU_LOCK); | ||
| 208 | } | ||
| 209 | |||
| 210 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) | ||
| 211 | { | ||
| 212 | arch_iommu->tlb_read_cr(obj, cr); | ||
| 213 | } | ||
| 214 | |||
| 215 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) | ||
| 216 | { | ||
| 217 | arch_iommu->tlb_load_cr(obj, cr); | ||
| 218 | |||
| 219 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | ||
| 220 | iommu_write_reg(obj, 1, MMU_LD_TLB); | ||
| 221 | } | ||
| 222 | |||
| 223 | /** | ||
| 224 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | ||
| 225 | * @obj: target iommu | ||
| 226 | * @cr: contents of cam and ram register | ||
| 227 | * @buf: output buffer | ||
| 228 | **/ | ||
| 229 | static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, | ||
| 230 | char *buf) | ||
| 231 | { | ||
| 232 | BUG_ON(!cr || !buf); | ||
| 233 | |||
| 234 | return arch_iommu->dump_cr(obj, cr, buf); | ||
| 235 | } | ||
| 236 | |||
| 237 | /* only used in iotlb iteration for-loop */ | ||
| 238 | static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) | ||
| 239 | { | ||
| 240 | struct cr_regs cr; | ||
| 241 | struct iotlb_lock l; | ||
| 242 | |||
| 243 | iotlb_lock_get(obj, &l); | ||
| 244 | l.vict = n; | ||
| 245 | iotlb_lock_set(obj, &l); | ||
| 246 | iotlb_read_cr(obj, &cr); | ||
| 247 | |||
| 248 | return cr; | ||
| 249 | } | ||
| 250 | |||
| 251 | /** | ||
| 252 | * load_iotlb_entry - Set an iommu tlb entry | ||
| 253 | * @obj: target iommu | ||
| 254 | * @e: an iommu tlb entry info | ||
| 255 | **/ | ||
| 256 | #ifdef PREFETCH_IOTLB | ||
| 257 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
| 258 | { | ||
| 259 | int err = 0; | ||
| 260 | struct iotlb_lock l; | ||
| 261 | struct cr_regs *cr; | ||
| 262 | |||
| 263 | if (!obj || !obj->nr_tlb_entries || !e) | ||
| 264 | return -EINVAL; | ||
| 265 | |||
| 266 | clk_enable(obj->clk); | ||
| 267 | |||
| 268 | iotlb_lock_get(obj, &l); | ||
| 269 | if (l.base == obj->nr_tlb_entries) { | ||
| 270 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); | ||
| 271 | err = -EBUSY; | ||
| 272 | goto out; | ||
| 273 | } | ||
| 274 | if (!e->prsvd) { | ||
| 275 | int i; | ||
| 276 | struct cr_regs tmp; | ||
| 277 | |||
| 278 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) | ||
| 279 | if (!iotlb_cr_valid(&tmp)) | ||
| 280 | break; | ||
| 281 | |||
| 282 | if (i == obj->nr_tlb_entries) { | ||
| 283 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | ||
| 284 | err = -EBUSY; | ||
| 285 | goto out; | ||
| 286 | } | ||
| 287 | |||
| 288 | iotlb_lock_get(obj, &l); | ||
| 289 | } else { | ||
| 290 | l.vict = l.base; | ||
| 291 | iotlb_lock_set(obj, &l); | ||
| 292 | } | ||
| 293 | |||
| 294 | cr = iotlb_alloc_cr(obj, e); | ||
| 295 | if (IS_ERR(cr)) { | ||
| 296 | clk_disable(obj->clk); | ||
| 297 | return PTR_ERR(cr); | ||
| 298 | } | ||
| 299 | |||
| 300 | iotlb_load_cr(obj, cr); | ||
| 301 | kfree(cr); | ||
| 302 | |||
| 303 | if (e->prsvd) | ||
| 304 | l.base++; | ||
| 305 | /* increment victim for next tlb load */ | ||
| 306 | if (++l.vict == obj->nr_tlb_entries) | ||
| 307 | l.vict = l.base; | ||
| 308 | iotlb_lock_set(obj, &l); | ||
| 309 | out: | ||
| 310 | clk_disable(obj->clk); | ||
| 311 | return err; | ||
| 312 | } | ||
| 313 | |||
| 314 | #else /* !PREFETCH_IOTLB */ | ||
| 315 | |||
| 316 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
| 317 | { | ||
| 318 | return 0; | ||
| 319 | } | ||
| 320 | |||
| 321 | #endif /* !PREFETCH_IOTLB */ | ||
| 322 | |||
| 323 | static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
| 324 | { | ||
| 325 | return load_iotlb_entry(obj, e); | ||
| 326 | } | ||
| 327 | |||
| 328 | /** | ||
| 329 | * flush_iotlb_page - Clear an iommu tlb entry | ||
| 330 | * @obj: target iommu | ||
| 331 | * @da: iommu device virtual address | ||
| 332 | * | ||
| 333 | * Clear an iommu tlb entry which includes 'da' address. | ||
| 334 | **/ | ||
| 335 | static void flush_iotlb_page(struct omap_iommu *obj, u32 da) | ||
| 336 | { | ||
| 337 | int i; | ||
| 338 | struct cr_regs cr; | ||
| 339 | |||
| 340 | clk_enable(obj->clk); | ||
| 341 | |||
| 342 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { | ||
| 343 | u32 start; | ||
| 344 | size_t bytes; | ||
| 345 | |||
| 346 | if (!iotlb_cr_valid(&cr)) | ||
| 347 | continue; | ||
| 348 | |||
| 349 | start = iotlb_cr_to_virt(&cr); | ||
| 350 | bytes = iopgsz_to_bytes(cr.cam & 3); | ||
| 351 | |||
| 352 | if ((start <= da) && (da < start + bytes)) { | ||
| 353 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | ||
| 354 | __func__, start, da, bytes); | ||
| 355 | iotlb_load_cr(obj, &cr); | ||
| 356 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | ||
| 357 | } | ||
| 358 | } | ||
| 359 | clk_disable(obj->clk); | ||
| 360 | |||
| 361 | if (i == obj->nr_tlb_entries) | ||
| 362 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | ||
| 363 | } | ||
| 364 | |||
| 365 | /** | ||
| 366 | * flush_iotlb_all - Clear all iommu tlb entries | ||
| 367 | * @obj: target iommu | ||
| 368 | **/ | ||
| 369 | static void flush_iotlb_all(struct omap_iommu *obj) | ||
| 370 | { | ||
| 371 | struct iotlb_lock l; | ||
| 372 | |||
| 373 | clk_enable(obj->clk); | ||
| 374 | |||
| 375 | l.base = 0; | ||
| 376 | l.vict = 0; | ||
| 377 | iotlb_lock_set(obj, &l); | ||
| 378 | |||
| 379 | iommu_write_reg(obj, 1, MMU_GFLUSH); | ||
| 380 | |||
| 381 | clk_disable(obj->clk); | ||
| 382 | } | ||
| 383 | |||
| 384 | #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | ||
| 385 | |||
| 386 | ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) | ||
| 387 | { | ||
| 388 | if (!obj || !buf) | ||
| 389 | return -EINVAL; | ||
| 390 | |||
| 391 | clk_enable(obj->clk); | ||
| 392 | |||
| 393 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); | ||
| 394 | |||
| 395 | clk_disable(obj->clk); | ||
| 396 | |||
| 397 | return bytes; | ||
| 398 | } | ||
| 399 | EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); | ||
| 400 | |||
| 401 | static int | ||
| 402 | __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) | ||
| 403 | { | ||
| 404 | int i; | ||
| 405 | struct iotlb_lock saved; | ||
| 406 | struct cr_regs tmp; | ||
| 407 | struct cr_regs *p = crs; | ||
| 408 | |||
| 409 | clk_enable(obj->clk); | ||
| 410 | iotlb_lock_get(obj, &saved); | ||
| 411 | |||
| 412 | for_each_iotlb_cr(obj, num, i, tmp) { | ||
| 413 | if (!iotlb_cr_valid(&tmp)) | ||
| 414 | continue; | ||
| 415 | *p++ = tmp; | ||
| 416 | } | ||
| 417 | |||
| 418 | iotlb_lock_set(obj, &saved); | ||
| 419 | clk_disable(obj->clk); | ||
| 420 | |||
| 421 | return p - crs; | ||
| 422 | } | ||
| 423 | |||
| 424 | /** | ||
| 425 | * omap_dump_tlb_entries - dump cr arrays to given buffer | ||
| 426 | * @obj: target iommu | ||
| 427 | * @buf: output buffer | ||
| 428 | **/ | ||
| 429 | size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) | ||
| 430 | { | ||
| 431 | int i, num; | ||
| 432 | struct cr_regs *cr; | ||
| 433 | char *p = buf; | ||
| 434 | |||
| 435 | num = bytes / sizeof(*cr); | ||
| 436 | num = min(obj->nr_tlb_entries, num); | ||
| 437 | |||
| 438 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); | ||
| 439 | if (!cr) | ||
| 440 | return 0; | ||
| 441 | |||
| 442 | num = __dump_tlb_entries(obj, cr, num); | ||
| 443 | for (i = 0; i < num; i++) | ||
| 444 | p += iotlb_dump_cr(obj, cr + i, p); | ||
| 445 | kfree(cr); | ||
| 446 | |||
| 447 | return p - buf; | ||
| 448 | } | ||
| 449 | EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); | ||
| 450 | |||
| 451 | int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | ||
| 452 | { | ||
| 453 | return driver_for_each_device(&omap_iommu_driver.driver, | ||
| 454 | NULL, data, fn); | ||
| 455 | } | ||
| 456 | EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); | ||
| 457 | |||
| 458 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | ||
| 459 | |||
| 460 | /* | ||
| 461 | * H/W pagetable operations | ||
| 462 | */ | ||
| 463 | static void flush_iopgd_range(u32 *first, u32 *last) | ||
| 464 | { | ||
| 465 | /* FIXME: L2 cache should be taken care of if it exists */ | ||
| 466 | do { | ||
| 467 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | ||
| 468 | : : "r" (first)); | ||
| 469 | first += L1_CACHE_BYTES / sizeof(*first); | ||
| 470 | } while (first <= last); | ||
| 471 | } | ||
| 472 | |||
| 473 | static void flush_iopte_range(u32 *first, u32 *last) | ||
| 474 | { | ||
| 475 | /* FIXME: L2 cache should be taken care of if it exists */ | ||
| 476 | do { | ||
| 477 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | ||
| 478 | : : "r" (first)); | ||
| 479 | first += L1_CACHE_BYTES / sizeof(*first); | ||
| 480 | } while (first <= last); | ||
| 481 | } | ||
| 482 | |||
| 483 | static void iopte_free(u32 *iopte) | ||
| 484 | { | ||
| 485 | /* Note: freed iopte's must be clean ready for re-use */ | ||
| 486 | kmem_cache_free(iopte_cachep, iopte); | ||
| 487 | } | ||
| 488 | |||
| 489 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) | ||
| 490 | { | ||
| 491 | u32 *iopte; | ||
| 492 | |||
| 493 | /* a table has already existed */ | ||
| 494 | if (*iopgd) | ||
| 495 | goto pte_ready; | ||
| 496 | |||
| 497 | /* | ||
| 498 | * do the allocation outside the page table lock | ||
| 499 | */ | ||
| 500 | spin_unlock(&obj->page_table_lock); | ||
| 501 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | ||
| 502 | spin_lock(&obj->page_table_lock); | ||
| 503 | |||
| 504 | if (!*iopgd) { | ||
| 505 | if (!iopte) | ||
| 506 | return ERR_PTR(-ENOMEM); | ||
| 507 | |||
| 508 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | ||
| 509 | flush_iopgd_range(iopgd, iopgd); | ||
| 510 | |||
| 511 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | ||
| 512 | } else { | ||
| 513 | /* We raced, free the reduniovant table */ | ||
| 514 | iopte_free(iopte); | ||
| 515 | } | ||
| 516 | |||
| 517 | pte_ready: | ||
| 518 | iopte = iopte_offset(iopgd, da); | ||
| 519 | |||
| 520 | dev_vdbg(obj->dev, | ||
| 521 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | ||
| 522 | __func__, da, iopgd, *iopgd, iopte, *iopte); | ||
| 523 | |||
| 524 | return iopte; | ||
| 525 | } | ||
| 526 | |||
| 527 | static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | ||
| 528 | { | ||
| 529 | u32 *iopgd = iopgd_offset(obj, da); | ||
| 530 | |||
| 531 | if ((da | pa) & ~IOSECTION_MASK) { | ||
| 532 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | ||
| 533 | __func__, da, pa, IOSECTION_SIZE); | ||
| 534 | return -EINVAL; | ||
| 535 | } | ||
| 536 | |||
| 537 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; | ||
| 538 | flush_iopgd_range(iopgd, iopgd); | ||
| 539 | return 0; | ||
| 540 | } | ||
| 541 | |||
| 542 | static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | ||
| 543 | { | ||
| 544 | u32 *iopgd = iopgd_offset(obj, da); | ||
| 545 | int i; | ||
| 546 | |||
| 547 | if ((da | pa) & ~IOSUPER_MASK) { | ||
| 548 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | ||
| 549 | __func__, da, pa, IOSUPER_SIZE); | ||
| 550 | return -EINVAL; | ||
| 551 | } | ||
| 552 | |||
| 553 | for (i = 0; i < 16; i++) | ||
| 554 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | ||
| 555 | flush_iopgd_range(iopgd, iopgd + 15); | ||
| 556 | return 0; | ||
| 557 | } | ||
| 558 | |||
| 559 | static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | ||
| 560 | { | ||
| 561 | u32 *iopgd = iopgd_offset(obj, da); | ||
| 562 | u32 *iopte = iopte_alloc(obj, iopgd, da); | ||
| 563 | |||
| 564 | if (IS_ERR(iopte)) | ||
| 565 | return PTR_ERR(iopte); | ||
| 566 | |||
| 567 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | ||
| 568 | flush_iopte_range(iopte, iopte); | ||
| 569 | |||
| 570 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | ||
| 571 | __func__, da, pa, iopte, *iopte); | ||
| 572 | |||
| 573 | return 0; | ||
| 574 | } | ||
| 575 | |||
| 576 | static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) | ||
| 577 | { | ||
| 578 | u32 *iopgd = iopgd_offset(obj, da); | ||
| 579 | u32 *iopte = iopte_alloc(obj, iopgd, da); | ||
| 580 | int i; | ||
| 581 | |||
| 582 | if ((da | pa) & ~IOLARGE_MASK) { | ||
| 583 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | ||
| 584 | __func__, da, pa, IOLARGE_SIZE); | ||
| 585 | return -EINVAL; | ||
| 586 | } | ||
| 587 | |||
| 588 | if (IS_ERR(iopte)) | ||
| 589 | return PTR_ERR(iopte); | ||
| 590 | |||
| 591 | for (i = 0; i < 16; i++) | ||
| 592 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | ||
| 593 | flush_iopte_range(iopte, iopte + 15); | ||
| 594 | return 0; | ||
| 595 | } | ||
| 596 | |||
| 597 | static int | ||
| 598 | iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) | ||
| 599 | { | ||
| 600 | int (*fn)(struct omap_iommu *, u32, u32, u32); | ||
| 601 | u32 prot; | ||
| 602 | int err; | ||
| 603 | |||
| 604 | if (!obj || !e) | ||
| 605 | return -EINVAL; | ||
| 606 | |||
| 607 | switch (e->pgsz) { | ||
| 608 | case MMU_CAM_PGSZ_16M: | ||
| 609 | fn = iopgd_alloc_super; | ||
| 610 | break; | ||
| 611 | case MMU_CAM_PGSZ_1M: | ||
| 612 | fn = iopgd_alloc_section; | ||
| 613 | break; | ||
| 614 | case MMU_CAM_PGSZ_64K: | ||
| 615 | fn = iopte_alloc_large; | ||
| 616 | break; | ||
| 617 | case MMU_CAM_PGSZ_4K: | ||
| 618 | fn = iopte_alloc_page; | ||
| 619 | break; | ||
| 620 | default: | ||
| 621 | fn = NULL; | ||
| 622 | BUG(); | ||
| 623 | break; | ||
| 624 | } | ||
| 625 | |||
| 626 | prot = get_iopte_attr(e); | ||
| 627 | |||
| 628 | spin_lock(&obj->page_table_lock); | ||
| 629 | err = fn(obj, e->da, e->pa, prot); | ||
| 630 | spin_unlock(&obj->page_table_lock); | ||
| 631 | |||
| 632 | return err; | ||
| 633 | } | ||
| 634 | |||
| 635 | /** | ||
| 636 | * omap_iopgtable_store_entry - Make an iommu pte entry | ||
| 637 | * @obj: target iommu | ||
| 638 | * @e: an iommu tlb entry info | ||
| 639 | **/ | ||
| 640 | int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) | ||
| 641 | { | ||
| 642 | int err; | ||
| 643 | |||
| 644 | flush_iotlb_page(obj, e->da); | ||
| 645 | err = iopgtable_store_entry_core(obj, e); | ||
| 646 | if (!err) | ||
| 647 | prefetch_iotlb_entry(obj, e); | ||
| 648 | return err; | ||
| 649 | } | ||
| 650 | EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); | ||
| 651 | |||
| 652 | /** | ||
| 653 | * iopgtable_lookup_entry - Lookup an iommu pte entry | ||
| 654 | * @obj: target iommu | ||
| 655 | * @da: iommu device virtual address | ||
| 656 | * @ppgd: iommu pgd entry pointer to be returned | ||
| 657 | * @ppte: iommu pte entry pointer to be returned | ||
| 658 | **/ | ||
| 659 | static void | ||
| 660 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | ||
| 661 | { | ||
| 662 | u32 *iopgd, *iopte = NULL; | ||
| 663 | |||
| 664 | iopgd = iopgd_offset(obj, da); | ||
| 665 | if (!*iopgd) | ||
| 666 | goto out; | ||
| 667 | |||
| 668 | if (iopgd_is_table(*iopgd)) | ||
| 669 | iopte = iopte_offset(iopgd, da); | ||
| 670 | out: | ||
| 671 | *ppgd = iopgd; | ||
| 672 | *ppte = iopte; | ||
| 673 | } | ||
| 674 | |||
| 675 | static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) | ||
| 676 | { | ||
| 677 | size_t bytes; | ||
| 678 | u32 *iopgd = iopgd_offset(obj, da); | ||
| 679 | int nent = 1; | ||
| 680 | |||
| 681 | if (!*iopgd) | ||
| 682 | return 0; | ||
| 683 | |||
| 684 | if (iopgd_is_table(*iopgd)) { | ||
| 685 | int i; | ||
| 686 | u32 *iopte = iopte_offset(iopgd, da); | ||
| 687 | |||
| 688 | bytes = IOPTE_SIZE; | ||
| 689 | if (*iopte & IOPTE_LARGE) { | ||
| 690 | nent *= 16; | ||
| 691 | /* rewind to the 1st entry */ | ||
| 692 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); | ||
| 693 | } | ||
| 694 | bytes *= nent; | ||
| 695 | memset(iopte, 0, nent * sizeof(*iopte)); | ||
| 696 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | ||
| 697 | |||
| 698 | /* | ||
| 699 | * do table walk to check if this table is necessary or not | ||
| 700 | */ | ||
| 701 | iopte = iopte_offset(iopgd, 0); | ||
| 702 | for (i = 0; i < PTRS_PER_IOPTE; i++) | ||
| 703 | if (iopte[i]) | ||
| 704 | goto out; | ||
| 705 | |||
| 706 | iopte_free(iopte); | ||
| 707 | nent = 1; /* for the next L1 entry */ | ||
| 708 | } else { | ||
| 709 | bytes = IOPGD_SIZE; | ||
| 710 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { | ||
| 711 | nent *= 16; | ||
| 712 | /* rewind to the 1st entry */ | ||
| 713 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); | ||
| 714 | } | ||
| 715 | bytes *= nent; | ||
| 716 | } | ||
| 717 | memset(iopgd, 0, nent * sizeof(*iopgd)); | ||
| 718 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | ||
| 719 | out: | ||
| 720 | return bytes; | ||
| 721 | } | ||
| 722 | |||
| 723 | /** | ||
| 724 | * iopgtable_clear_entry - Remove an iommu pte entry | ||
| 725 | * @obj: target iommu | ||
| 726 | * @da: iommu device virtual address | ||
| 727 | **/ | ||
| 728 | static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) | ||
| 729 | { | ||
| 730 | size_t bytes; | ||
| 731 | |||
| 732 | spin_lock(&obj->page_table_lock); | ||
| 733 | |||
| 734 | bytes = iopgtable_clear_entry_core(obj, da); | ||
| 735 | flush_iotlb_page(obj, da); | ||
| 736 | |||
| 737 | spin_unlock(&obj->page_table_lock); | ||
| 738 | |||
| 739 | return bytes; | ||
| 740 | } | ||
| 741 | |||
| 742 | static void iopgtable_clear_entry_all(struct omap_iommu *obj) | ||
| 743 | { | ||
| 744 | int i; | ||
| 745 | |||
| 746 | spin_lock(&obj->page_table_lock); | ||
| 747 | |||
| 748 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | ||
| 749 | u32 da; | ||
| 750 | u32 *iopgd; | ||
| 751 | |||
| 752 | da = i << IOPGD_SHIFT; | ||
| 753 | iopgd = iopgd_offset(obj, da); | ||
| 754 | |||
| 755 | if (!*iopgd) | ||
| 756 | continue; | ||
| 757 | |||
| 758 | if (iopgd_is_table(*iopgd)) | ||
| 759 | iopte_free(iopte_offset(iopgd, 0)); | ||
| 760 | |||
| 761 | *iopgd = 0; | ||
| 762 | flush_iopgd_range(iopgd, iopgd); | ||
| 763 | } | ||
| 764 | |||
| 765 | flush_iotlb_all(obj); | ||
| 766 | |||
| 767 | spin_unlock(&obj->page_table_lock); | ||
| 768 | } | ||
| 769 | |||
| 770 | /* | ||
| 771 | * Device IOMMU generic operations | ||
| 772 | */ | ||
| 773 | static irqreturn_t iommu_fault_handler(int irq, void *data) | ||
| 774 | { | ||
| 775 | u32 da, errs; | ||
| 776 | u32 *iopgd, *iopte; | ||
| 777 | struct omap_iommu *obj = data; | ||
| 778 | struct iommu_domain *domain = obj->domain; | ||
| 779 | |||
| 780 | if (!obj->refcount) | ||
| 781 | return IRQ_NONE; | ||
| 782 | |||
| 783 | clk_enable(obj->clk); | ||
| 784 | errs = iommu_report_fault(obj, &da); | ||
| 785 | clk_disable(obj->clk); | ||
| 786 | if (errs == 0) | ||
| 787 | return IRQ_HANDLED; | ||
| 788 | |||
| 789 | /* Fault callback or TLB/PTE Dynamic loading */ | ||
| 790 | if (!report_iommu_fault(domain, obj->dev, da, 0)) | ||
| 791 | return IRQ_HANDLED; | ||
| 792 | |||
| 793 | iommu_disable(obj); | ||
| 794 | |||
| 795 | iopgd = iopgd_offset(obj, da); | ||
| 796 | |||
| 797 | if (!iopgd_is_table(*iopgd)) { | ||
| 798 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " | ||
| 799 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); | ||
| 800 | return IRQ_NONE; | ||
| 801 | } | ||
| 802 | |||
| 803 | iopte = iopte_offset(iopgd, da); | ||
| 804 | |||
| 805 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " | ||
| 806 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, | ||
| 807 | iopte, *iopte); | ||
| 808 | |||
| 809 | return IRQ_NONE; | ||
| 810 | } | ||
| 811 | |||
| 812 | static int device_match_by_alias(struct device *dev, void *data) | ||
| 813 | { | ||
| 814 | struct omap_iommu *obj = to_iommu(dev); | ||
| 815 | const char *name = data; | ||
| 816 | |||
| 817 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | ||
| 818 | |||
| 819 | return strcmp(obj->name, name) == 0; | ||
| 820 | } | ||
| 821 | |||
| 822 | /** | ||
| 823 | * omap_find_iommu_device() - find an omap iommu device by name | ||
| 824 | * @name: name of the iommu device | ||
| 825 | * | ||
| 826 | * The generic iommu API requires the caller to provide the device | ||
| 827 | * he wishes to attach to a certain iommu domain. | ||
| 828 | * | ||
| 829 | * Drivers generally should not bother with this as it should just | ||
| 830 | * be taken care of by the DMA-API using dev_archdata. | ||
| 831 | * | ||
| 832 | * This function is provided as an interim solution until the latter | ||
| 833 | * materializes, and omap3isp is fully migrated to the DMA-API. | ||
| 834 | */ | ||
| 835 | struct device *omap_find_iommu_device(const char *name) | ||
| 836 | { | ||
| 837 | return driver_find_device(&omap_iommu_driver.driver, NULL, | ||
| 838 | (void *)name, | ||
| 839 | device_match_by_alias); | ||
| 840 | } | ||
| 841 | EXPORT_SYMBOL_GPL(omap_find_iommu_device); | ||
| 842 | |||
| 843 | /** | ||
| 844 | * omap_iommu_attach() - attach iommu device to an iommu domain | ||
| 845 | * @dev: target omap iommu device | ||
| 846 | * @iopgd: page table | ||
| 847 | **/ | ||
| 848 | static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) | ||
| 849 | { | ||
| 850 | int err = -ENOMEM; | ||
| 851 | struct omap_iommu *obj = to_iommu(dev); | ||
| 852 | |||
| 853 | spin_lock(&obj->iommu_lock); | ||
| 854 | |||
| 855 | /* an iommu device can only be attached once */ | ||
| 856 | if (++obj->refcount > 1) { | ||
| 857 | dev_err(dev, "%s: already attached!\n", obj->name); | ||
| 858 | err = -EBUSY; | ||
| 859 | goto err_enable; | ||
| 860 | } | ||
| 861 | |||
| 862 | obj->iopgd = iopgd; | ||
| 863 | err = iommu_enable(obj); | ||
| 864 | if (err) | ||
| 865 | goto err_enable; | ||
| 866 | flush_iotlb_all(obj); | ||
| 867 | |||
| 868 | if (!try_module_get(obj->owner)) | ||
| 869 | goto err_module; | ||
| 870 | |||
| 871 | spin_unlock(&obj->iommu_lock); | ||
| 872 | |||
| 873 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | ||
| 874 | return obj; | ||
| 875 | |||
| 876 | err_module: | ||
| 877 | if (obj->refcount == 1) | ||
| 878 | iommu_disable(obj); | ||
| 879 | err_enable: | ||
| 880 | obj->refcount--; | ||
| 881 | spin_unlock(&obj->iommu_lock); | ||
| 882 | return ERR_PTR(err); | ||
| 883 | } | ||
| 884 | |||
| 885 | /** | ||
| 886 | * omap_iommu_detach - release iommu device | ||
| 887 | * @obj: target iommu | ||
| 888 | **/ | ||
| 889 | static void omap_iommu_detach(struct omap_iommu *obj) | ||
| 890 | { | ||
| 891 | if (!obj || IS_ERR(obj)) | ||
| 892 | return; | ||
| 893 | |||
| 894 | spin_lock(&obj->iommu_lock); | ||
| 895 | |||
| 896 | if (--obj->refcount == 0) | ||
| 897 | iommu_disable(obj); | ||
| 898 | |||
| 899 | module_put(obj->owner); | ||
| 900 | |||
| 901 | obj->iopgd = NULL; | ||
| 902 | |||
| 903 | spin_unlock(&obj->iommu_lock); | ||
| 904 | |||
| 905 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | ||
| 906 | } | ||
| 907 | |||
| 908 | /* | ||
| 909 | * OMAP Device MMU(IOMMU) detection | ||
| 910 | */ | ||
| 911 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | ||
| 912 | { | ||
| 913 | int err = -ENODEV; | ||
| 914 | int irq; | ||
| 915 | struct omap_iommu *obj; | ||
| 916 | struct resource *res; | ||
| 917 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | ||
| 918 | |||
| 919 | if (pdev->num_resources != 2) | ||
| 920 | return -EINVAL; | ||
| 921 | |||
| 922 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | ||
| 923 | if (!obj) | ||
| 924 | return -ENOMEM; | ||
| 925 | |||
| 926 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | ||
| 927 | if (IS_ERR(obj->clk)) | ||
| 928 | goto err_clk; | ||
| 929 | |||
| 930 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | ||
| 931 | obj->name = pdata->name; | ||
| 932 | obj->dev = &pdev->dev; | ||
| 933 | obj->ctx = (void *)obj + sizeof(*obj); | ||
| 934 | obj->da_start = pdata->da_start; | ||
| 935 | obj->da_end = pdata->da_end; | ||
| 936 | |||
| 937 | spin_lock_init(&obj->iommu_lock); | ||
| 938 | mutex_init(&obj->mmap_lock); | ||
| 939 | spin_lock_init(&obj->page_table_lock); | ||
| 940 | INIT_LIST_HEAD(&obj->mmap); | ||
| 941 | |||
| 942 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 943 | if (!res) { | ||
| 944 | err = -ENODEV; | ||
| 945 | goto err_mem; | ||
| 946 | } | ||
| 947 | |||
| 948 | res = request_mem_region(res->start, resource_size(res), | ||
| 949 | dev_name(&pdev->dev)); | ||
| 950 | if (!res) { | ||
| 951 | err = -EIO; | ||
| 952 | goto err_mem; | ||
| 953 | } | ||
| 954 | |||
| 955 | obj->regbase = ioremap(res->start, resource_size(res)); | ||
| 956 | if (!obj->regbase) { | ||
| 957 | err = -ENOMEM; | ||
| 958 | goto err_ioremap; | ||
| 959 | } | ||
| 960 | |||
| 961 | irq = platform_get_irq(pdev, 0); | ||
| 962 | if (irq < 0) { | ||
| 963 | err = -ENODEV; | ||
| 964 | goto err_irq; | ||
| 965 | } | ||
| 966 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | ||
| 967 | dev_name(&pdev->dev), obj); | ||
| 968 | if (err < 0) | ||
| 969 | goto err_irq; | ||
| 970 | platform_set_drvdata(pdev, obj); | ||
| 971 | |||
| 972 | dev_info(&pdev->dev, "%s registered\n", obj->name); | ||
| 973 | return 0; | ||
| 974 | |||
| 975 | err_irq: | ||
| 976 | iounmap(obj->regbase); | ||
| 977 | err_ioremap: | ||
| 978 | release_mem_region(res->start, resource_size(res)); | ||
| 979 | err_mem: | ||
| 980 | clk_put(obj->clk); | ||
| 981 | err_clk: | ||
| 982 | kfree(obj); | ||
| 983 | return err; | ||
| 984 | } | ||
| 985 | |||
| 986 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | ||
| 987 | { | ||
| 988 | int irq; | ||
| 989 | struct resource *res; | ||
| 990 | struct omap_iommu *obj = platform_get_drvdata(pdev); | ||
| 991 | |||
| 992 | platform_set_drvdata(pdev, NULL); | ||
| 993 | |||
| 994 | iopgtable_clear_entry_all(obj); | ||
| 995 | |||
| 996 | irq = platform_get_irq(pdev, 0); | ||
| 997 | free_irq(irq, obj); | ||
| 998 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 999 | release_mem_region(res->start, resource_size(res)); | ||
| 1000 | iounmap(obj->regbase); | ||
| 1001 | |||
| 1002 | clk_put(obj->clk); | ||
| 1003 | dev_info(&pdev->dev, "%s removed\n", obj->name); | ||
| 1004 | kfree(obj); | ||
| 1005 | return 0; | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | static struct platform_driver omap_iommu_driver = { | ||
| 1009 | .probe = omap_iommu_probe, | ||
| 1010 | .remove = __devexit_p(omap_iommu_remove), | ||
| 1011 | .driver = { | ||
| 1012 | .name = "omap-iommu", | ||
| 1013 | }, | ||
| 1014 | }; | ||
| 1015 | |||
| 1016 | static void iopte_cachep_ctor(void *iopte) | ||
| 1017 | { | ||
| 1018 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, | ||
| 1022 | phys_addr_t pa, int order, int prot) | ||
| 1023 | { | ||
| 1024 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
| 1025 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | ||
| 1026 | struct device *dev = oiommu->dev; | ||
| 1027 | size_t bytes = PAGE_SIZE << order; | ||
| 1028 | struct iotlb_entry e; | ||
| 1029 | int omap_pgsz; | ||
| 1030 | u32 ret, flags; | ||
| 1031 | |||
| 1032 | /* we only support mapping a single iommu page for now */ | ||
| 1033 | omap_pgsz = bytes_to_iopgsz(bytes); | ||
| 1034 | if (omap_pgsz < 0) { | ||
| 1035 | dev_err(dev, "invalid size to map: %d\n", bytes); | ||
| 1036 | return -EINVAL; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); | ||
| 1040 | |||
| 1041 | flags = omap_pgsz | prot; | ||
| 1042 | |||
| 1043 | iotlb_init_entry(&e, da, pa, flags); | ||
| 1044 | |||
| 1045 | ret = omap_iopgtable_store_entry(oiommu, &e); | ||
| 1046 | if (ret) | ||
| 1047 | dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); | ||
| 1048 | |||
| 1049 | return ret; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | ||
| 1053 | int order) | ||
| 1054 | { | ||
| 1055 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
| 1056 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | ||
| 1057 | struct device *dev = oiommu->dev; | ||
| 1058 | size_t unmap_size; | ||
| 1059 | |||
| 1060 | dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order); | ||
| 1061 | |||
| 1062 | unmap_size = iopgtable_clear_entry(oiommu, da); | ||
| 1063 | |||
| 1064 | return unmap_size ? get_order(unmap_size) : -EINVAL; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | static int | ||
| 1068 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | ||
| 1069 | { | ||
| 1070 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
| 1071 | struct omap_iommu *oiommu; | ||
| 1072 | int ret = 0; | ||
| 1073 | |||
| 1074 | spin_lock(&omap_domain->lock); | ||
| 1075 | |||
| 1076 | /* only a single device is supported per domain for now */ | ||
| 1077 | if (omap_domain->iommu_dev) { | ||
| 1078 | dev_err(dev, "iommu domain is already attached\n"); | ||
| 1079 | ret = -EBUSY; | ||
| 1080 | goto out; | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | /* get a handle to and enable the omap iommu */ | ||
| 1084 | oiommu = omap_iommu_attach(dev, omap_domain->pgtable); | ||
| 1085 | if (IS_ERR(oiommu)) { | ||
| 1086 | ret = PTR_ERR(oiommu); | ||
| 1087 | dev_err(dev, "can't get omap iommu: %d\n", ret); | ||
| 1088 | goto out; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | omap_domain->iommu_dev = oiommu; | ||
| 1092 | oiommu->domain = domain; | ||
| 1093 | |||
| 1094 | out: | ||
| 1095 | spin_unlock(&omap_domain->lock); | ||
| 1096 | return ret; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | static void omap_iommu_detach_dev(struct iommu_domain *domain, | ||
| 1100 | struct device *dev) | ||
| 1101 | { | ||
| 1102 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
| 1103 | struct omap_iommu *oiommu = to_iommu(dev); | ||
| 1104 | |||
| 1105 | spin_lock(&omap_domain->lock); | ||
| 1106 | |||
| 1107 | /* only a single device is supported per domain for now */ | ||
| 1108 | if (omap_domain->iommu_dev != oiommu) { | ||
| 1109 | dev_err(dev, "invalid iommu device\n"); | ||
| 1110 | goto out; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | iopgtable_clear_entry_all(oiommu); | ||
| 1114 | |||
| 1115 | omap_iommu_detach(oiommu); | ||
| 1116 | |||
| 1117 | omap_domain->iommu_dev = NULL; | ||
| 1118 | |||
| 1119 | out: | ||
| 1120 | spin_unlock(&omap_domain->lock); | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | static int omap_iommu_domain_init(struct iommu_domain *domain) | ||
| 1124 | { | ||
| 1125 | struct omap_iommu_domain *omap_domain; | ||
| 1126 | |||
| 1127 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); | ||
| 1128 | if (!omap_domain) { | ||
| 1129 | pr_err("kzalloc failed\n"); | ||
| 1130 | goto out; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); | ||
| 1134 | if (!omap_domain->pgtable) { | ||
| 1135 | pr_err("kzalloc failed\n"); | ||
| 1136 | goto fail_nomem; | ||
| 1137 | } | ||
| 1138 | |||
| 1139 | /* | ||
| 1140 | * should never fail, but please keep this around to ensure | ||
| 1141 | * we keep the hardware happy | ||
| 1142 | */ | ||
| 1143 | BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); | ||
| 1144 | |||
| 1145 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); | ||
| 1146 | spin_lock_init(&omap_domain->lock); | ||
| 1147 | |||
| 1148 | domain->priv = omap_domain; | ||
| 1149 | |||
| 1150 | return 0; | ||
| 1151 | |||
| 1152 | fail_nomem: | ||
| 1153 | kfree(omap_domain); | ||
| 1154 | out: | ||
| 1155 | return -ENOMEM; | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | /* assume device was already detached */ | ||
| 1159 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) | ||
| 1160 | { | ||
| 1161 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
| 1162 | |||
| 1163 | domain->priv = NULL; | ||
| 1164 | |||
| 1165 | kfree(omap_domain->pgtable); | ||
| 1166 | kfree(omap_domain); | ||
| 1167 | } | ||
| 1168 | |||
| 1169 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | ||
| 1170 | unsigned long da) | ||
| 1171 | { | ||
| 1172 | struct omap_iommu_domain *omap_domain = domain->priv; | ||
| 1173 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | ||
| 1174 | struct device *dev = oiommu->dev; | ||
| 1175 | u32 *pgd, *pte; | ||
| 1176 | phys_addr_t ret = 0; | ||
| 1177 | |||
| 1178 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); | ||
| 1179 | |||
| 1180 | if (pte) { | ||
| 1181 | if (iopte_is_small(*pte)) | ||
| 1182 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); | ||
| 1183 | else if (iopte_is_large(*pte)) | ||
| 1184 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); | ||
| 1185 | else | ||
| 1186 | dev_err(dev, "bogus pte 0x%x", *pte); | ||
| 1187 | } else { | ||
| 1188 | if (iopgd_is_section(*pgd)) | ||
| 1189 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); | ||
| 1190 | else if (iopgd_is_super(*pgd)) | ||
| 1191 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); | ||
| 1192 | else | ||
| 1193 | dev_err(dev, "bogus pgd 0x%x", *pgd); | ||
| 1194 | } | ||
| 1195 | |||
| 1196 | return ret; | ||
| 1197 | } | ||
| 1198 | |||
| 1199 | static int omap_iommu_domain_has_cap(struct iommu_domain *domain, | ||
| 1200 | unsigned long cap) | ||
| 1201 | { | ||
| 1202 | return 0; | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | static struct iommu_ops omap_iommu_ops = { | ||
| 1206 | .domain_init = omap_iommu_domain_init, | ||
| 1207 | .domain_destroy = omap_iommu_domain_destroy, | ||
| 1208 | .attach_dev = omap_iommu_attach_dev, | ||
| 1209 | .detach_dev = omap_iommu_detach_dev, | ||
| 1210 | .map = omap_iommu_map, | ||
| 1211 | .unmap = omap_iommu_unmap, | ||
| 1212 | .iova_to_phys = omap_iommu_iova_to_phys, | ||
| 1213 | .domain_has_cap = omap_iommu_domain_has_cap, | ||
| 1214 | }; | ||
| 1215 | |||
| 1216 | static int __init omap_iommu_init(void) | ||
| 1217 | { | ||
| 1218 | struct kmem_cache *p; | ||
| 1219 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | ||
| 1220 | size_t align = 1 << 10; /* L2 pagetable alignement */ | ||
| 1221 | |||
| 1222 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | ||
| 1223 | iopte_cachep_ctor); | ||
| 1224 | if (!p) | ||
| 1225 | return -ENOMEM; | ||
| 1226 | iopte_cachep = p; | ||
| 1227 | |||
| 1228 | bus_set_iommu(&platform_bus_type, &omap_iommu_ops); | ||
| 1229 | |||
| 1230 | return platform_driver_register(&omap_iommu_driver); | ||
| 1231 | } | ||
| 1232 | module_init(omap_iommu_init); | ||
| 1233 | |||
| 1234 | static void __exit omap_iommu_exit(void) | ||
| 1235 | { | ||
| 1236 | kmem_cache_destroy(iopte_cachep); | ||
| 1237 | |||
| 1238 | platform_driver_unregister(&omap_iommu_driver); | ||
| 1239 | } | ||
| 1240 | module_exit(omap_iommu_exit); | ||
| 1241 | |||
| 1242 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | ||
| 1243 | MODULE_ALIAS("platform:omap-iommu"); | ||
| 1244 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | ||
| 1245 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c new file mode 100644 index 000000000000..e8fdb8830f69 --- /dev/null +++ b/drivers/iommu/omap-iovmm.c | |||
| @@ -0,0 +1,742 @@ | |||
| 1 | /* | ||
| 2 | * omap iommu: simple virtual address space management | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2009 Nokia Corporation | ||
| 5 | * | ||
| 6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/err.h> | ||
| 14 | #include <linux/slab.h> | ||
| 15 | #include <linux/vmalloc.h> | ||
| 16 | #include <linux/device.h> | ||
| 17 | #include <linux/scatterlist.h> | ||
| 18 | #include <linux/iommu.h> | ||
| 19 | |||
| 20 | #include <asm/cacheflush.h> | ||
| 21 | #include <asm/mach/map.h> | ||
| 22 | |||
| 23 | #include <plat/iommu.h> | ||
| 24 | #include <plat/iovmm.h> | ||
| 25 | |||
| 26 | #include <plat/iopgtable.h> | ||
| 27 | |||
| 28 | static struct kmem_cache *iovm_area_cachep; | ||
| 29 | |||
| 30 | /* return the offset of the first scatterlist entry in a sg table */ | ||
| 31 | static unsigned int sgtable_offset(const struct sg_table *sgt) | ||
| 32 | { | ||
| 33 | if (!sgt || !sgt->nents) | ||
| 34 | return 0; | ||
| 35 | |||
| 36 | return sgt->sgl->offset; | ||
| 37 | } | ||
| 38 | |||
| 39 | /* return total bytes of sg buffers */ | ||
| 40 | static size_t sgtable_len(const struct sg_table *sgt) | ||
| 41 | { | ||
| 42 | unsigned int i, total = 0; | ||
| 43 | struct scatterlist *sg; | ||
| 44 | |||
| 45 | if (!sgt) | ||
| 46 | return 0; | ||
| 47 | |||
| 48 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 49 | size_t bytes; | ||
| 50 | |||
| 51 | bytes = sg->length + sg->offset; | ||
| 52 | |||
| 53 | if (!iopgsz_ok(bytes)) { | ||
| 54 | pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", | ||
| 55 | __func__, i, bytes, sg->offset); | ||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | |||
| 59 | if (i && sg->offset) { | ||
| 60 | pr_err("%s: sg[%d] offset not allowed in internal " | ||
| 61 | "entries\n", __func__, i); | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | |||
| 65 | total += bytes; | ||
| 66 | } | ||
| 67 | |||
| 68 | return total; | ||
| 69 | } | ||
| 70 | #define sgtable_ok(x) (!!sgtable_len(x)) | ||
| 71 | |||
| 72 | static unsigned max_alignment(u32 addr) | ||
| 73 | { | ||
| 74 | int i; | ||
| 75 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | ||
| 76 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | ||
| 77 | ; | ||
| 78 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | /* | ||
| 82 | * calculate the optimal number sg elements from total bytes based on | ||
| 83 | * iommu superpages | ||
| 84 | */ | ||
| 85 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) | ||
| 86 | { | ||
| 87 | unsigned nr_entries = 0, ent_sz; | ||
| 88 | |||
| 89 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | ||
| 90 | pr_err("%s: wrong size %08x\n", __func__, bytes); | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | while (bytes) { | ||
| 95 | ent_sz = max_alignment(da | pa); | ||
| 96 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); | ||
| 97 | nr_entries++; | ||
| 98 | da += ent_sz; | ||
| 99 | pa += ent_sz; | ||
| 100 | bytes -= ent_sz; | ||
| 101 | } | ||
| 102 | |||
| 103 | return nr_entries; | ||
| 104 | } | ||
| 105 | |||
| 106 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | ||
| 107 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, | ||
| 108 | u32 da, u32 pa) | ||
| 109 | { | ||
| 110 | unsigned int nr_entries; | ||
| 111 | int err; | ||
| 112 | struct sg_table *sgt; | ||
| 113 | |||
| 114 | if (!bytes) | ||
| 115 | return ERR_PTR(-EINVAL); | ||
| 116 | |||
| 117 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | ||
| 118 | return ERR_PTR(-EINVAL); | ||
| 119 | |||
| 120 | if (flags & IOVMF_LINEAR) { | ||
| 121 | nr_entries = sgtable_nents(bytes, da, pa); | ||
| 122 | if (!nr_entries) | ||
| 123 | return ERR_PTR(-EINVAL); | ||
| 124 | } else | ||
| 125 | nr_entries = bytes / PAGE_SIZE; | ||
| 126 | |||
| 127 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
| 128 | if (!sgt) | ||
| 129 | return ERR_PTR(-ENOMEM); | ||
| 130 | |||
| 131 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | ||
| 132 | if (err) { | ||
| 133 | kfree(sgt); | ||
| 134 | return ERR_PTR(err); | ||
| 135 | } | ||
| 136 | |||
| 137 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | ||
| 138 | |||
| 139 | return sgt; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* free sg_table header(a kind of superblock) */ | ||
| 143 | static void sgtable_free(struct sg_table *sgt) | ||
| 144 | { | ||
| 145 | if (!sgt) | ||
| 146 | return; | ||
| 147 | |||
| 148 | sg_free_table(sgt); | ||
| 149 | kfree(sgt); | ||
| 150 | |||
| 151 | pr_debug("%s: sgt:%p\n", __func__, sgt); | ||
| 152 | } | ||
| 153 | |||
| 154 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | ||
| 155 | static void *vmap_sg(const struct sg_table *sgt) | ||
| 156 | { | ||
| 157 | u32 va; | ||
| 158 | size_t total; | ||
| 159 | unsigned int i; | ||
| 160 | struct scatterlist *sg; | ||
| 161 | struct vm_struct *new; | ||
| 162 | const struct mem_type *mtype; | ||
| 163 | |||
| 164 | mtype = get_mem_type(MT_DEVICE); | ||
| 165 | if (!mtype) | ||
| 166 | return ERR_PTR(-EINVAL); | ||
| 167 | |||
| 168 | total = sgtable_len(sgt); | ||
| 169 | if (!total) | ||
| 170 | return ERR_PTR(-EINVAL); | ||
| 171 | |||
| 172 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | ||
| 173 | if (!new) | ||
| 174 | return ERR_PTR(-ENOMEM); | ||
| 175 | va = (u32)new->addr; | ||
| 176 | |||
| 177 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 178 | size_t bytes; | ||
| 179 | u32 pa; | ||
| 180 | int err; | ||
| 181 | |||
| 182 | pa = sg_phys(sg) - sg->offset; | ||
| 183 | bytes = sg->length + sg->offset; | ||
| 184 | |||
| 185 | BUG_ON(bytes != PAGE_SIZE); | ||
| 186 | |||
| 187 | err = ioremap_page(va, pa, mtype); | ||
| 188 | if (err) | ||
| 189 | goto err_out; | ||
| 190 | |||
| 191 | va += bytes; | ||
| 192 | } | ||
| 193 | |||
| 194 | flush_cache_vmap((unsigned long)new->addr, | ||
| 195 | (unsigned long)(new->addr + total)); | ||
| 196 | return new->addr; | ||
| 197 | |||
| 198 | err_out: | ||
| 199 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | ||
| 200 | vunmap(new->addr); | ||
| 201 | return ERR_PTR(-EAGAIN); | ||
| 202 | } | ||
| 203 | |||
| 204 | static inline void vunmap_sg(const void *va) | ||
| 205 | { | ||
| 206 | vunmap(va); | ||
| 207 | } | ||
| 208 | |||
| 209 | static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, | ||
| 210 | const u32 da) | ||
| 211 | { | ||
| 212 | struct iovm_struct *tmp; | ||
| 213 | |||
| 214 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
| 215 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | ||
| 216 | size_t len; | ||
| 217 | |||
| 218 | len = tmp->da_end - tmp->da_start; | ||
| 219 | |||
| 220 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | ||
| 221 | __func__, tmp->da_start, da, tmp->da_end, len, | ||
| 222 | tmp->flags); | ||
| 223 | |||
| 224 | return tmp; | ||
| 225 | } | ||
| 226 | } | ||
| 227 | |||
| 228 | return NULL; | ||
| 229 | } | ||
| 230 | |||
| 231 | /** | ||
| 232 | * omap_find_iovm_area - find iovma which includes @da | ||
| 233 | * @da: iommu device virtual address | ||
| 234 | * | ||
| 235 | * Find the existing iovma starting at @da | ||
| 236 | */ | ||
| 237 | struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da) | ||
| 238 | { | ||
| 239 | struct iovm_struct *area; | ||
| 240 | |||
| 241 | mutex_lock(&obj->mmap_lock); | ||
| 242 | area = __find_iovm_area(obj, da); | ||
| 243 | mutex_unlock(&obj->mmap_lock); | ||
| 244 | |||
| 245 | return area; | ||
| 246 | } | ||
| 247 | EXPORT_SYMBOL_GPL(omap_find_iovm_area); | ||
| 248 | |||
| 249 | /* | ||
| 250 | * This finds the hole(area) which fits the requested address and len | ||
| 251 | * in iovmas mmap, and returns the new allocated iovma. | ||
| 252 | */ | ||
| 253 | static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, | ||
| 254 | size_t bytes, u32 flags) | ||
| 255 | { | ||
| 256 | struct iovm_struct *new, *tmp; | ||
| 257 | u32 start, prev_end, alignment; | ||
| 258 | |||
| 259 | if (!obj || !bytes) | ||
| 260 | return ERR_PTR(-EINVAL); | ||
| 261 | |||
| 262 | start = da; | ||
| 263 | alignment = PAGE_SIZE; | ||
| 264 | |||
| 265 | if (~flags & IOVMF_DA_FIXED) { | ||
| 266 | /* Don't map address 0 */ | ||
| 267 | start = obj->da_start ? obj->da_start : alignment; | ||
| 268 | |||
| 269 | if (flags & IOVMF_LINEAR) | ||
| 270 | alignment = iopgsz_max(bytes); | ||
| 271 | start = roundup(start, alignment); | ||
| 272 | } else if (start < obj->da_start || start > obj->da_end || | ||
| 273 | obj->da_end - start < bytes) { | ||
| 274 | return ERR_PTR(-EINVAL); | ||
| 275 | } | ||
| 276 | |||
| 277 | tmp = NULL; | ||
| 278 | if (list_empty(&obj->mmap)) | ||
| 279 | goto found; | ||
| 280 | |||
| 281 | prev_end = 0; | ||
| 282 | list_for_each_entry(tmp, &obj->mmap, list) { | ||
| 283 | |||
| 284 | if (prev_end > start) | ||
| 285 | break; | ||
| 286 | |||
| 287 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) | ||
| 288 | goto found; | ||
| 289 | |||
| 290 | if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) | ||
| 291 | start = roundup(tmp->da_end + 1, alignment); | ||
| 292 | |||
| 293 | prev_end = tmp->da_end; | ||
| 294 | } | ||
| 295 | |||
| 296 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) | ||
| 297 | goto found; | ||
| 298 | |||
| 299 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | ||
| 300 | __func__, da, bytes, flags); | ||
| 301 | |||
| 302 | return ERR_PTR(-EINVAL); | ||
| 303 | |||
| 304 | found: | ||
| 305 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | ||
| 306 | if (!new) | ||
| 307 | return ERR_PTR(-ENOMEM); | ||
| 308 | |||
| 309 | new->iommu = obj; | ||
| 310 | new->da_start = start; | ||
| 311 | new->da_end = start + bytes; | ||
| 312 | new->flags = flags; | ||
| 313 | |||
| 314 | /* | ||
| 315 | * keep ascending order of iovmas | ||
| 316 | */ | ||
| 317 | if (tmp) | ||
| 318 | list_add_tail(&new->list, &tmp->list); | ||
| 319 | else | ||
| 320 | list_add(&new->list, &obj->mmap); | ||
| 321 | |||
| 322 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | ||
| 323 | __func__, new->da_start, start, new->da_end, bytes, flags); | ||
| 324 | |||
| 325 | return new; | ||
| 326 | } | ||
| 327 | |||
| 328 | static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) | ||
| 329 | { | ||
| 330 | size_t bytes; | ||
| 331 | |||
| 332 | BUG_ON(!obj || !area); | ||
| 333 | |||
| 334 | bytes = area->da_end - area->da_start; | ||
| 335 | |||
| 336 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | ||
| 337 | __func__, area->da_start, area->da_end, bytes, area->flags); | ||
| 338 | |||
| 339 | list_del(&area->list); | ||
| 340 | kmem_cache_free(iovm_area_cachep, area); | ||
| 341 | } | ||
| 342 | |||
| 343 | /** | ||
| 344 | * omap_da_to_va - convert (d) to (v) | ||
| 345 | * @obj: objective iommu | ||
| 346 | * @da: iommu device virtual address | ||
| 347 | * @va: mpu virtual address | ||
| 348 | * | ||
| 349 | * Returns mpu virtual addr which corresponds to a given device virtual addr | ||
| 350 | */ | ||
| 351 | void *omap_da_to_va(struct omap_iommu *obj, u32 da) | ||
| 352 | { | ||
| 353 | void *va = NULL; | ||
| 354 | struct iovm_struct *area; | ||
| 355 | |||
| 356 | mutex_lock(&obj->mmap_lock); | ||
| 357 | |||
| 358 | area = __find_iovm_area(obj, da); | ||
| 359 | if (!area) { | ||
| 360 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
| 361 | goto out; | ||
| 362 | } | ||
| 363 | va = area->va; | ||
| 364 | out: | ||
| 365 | mutex_unlock(&obj->mmap_lock); | ||
| 366 | |||
| 367 | return va; | ||
| 368 | } | ||
| 369 | EXPORT_SYMBOL_GPL(omap_da_to_va); | ||
| 370 | |||
| 371 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | ||
| 372 | { | ||
| 373 | unsigned int i; | ||
| 374 | struct scatterlist *sg; | ||
| 375 | void *va = _va; | ||
| 376 | void *va_end; | ||
| 377 | |||
| 378 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 379 | struct page *pg; | ||
| 380 | const size_t bytes = PAGE_SIZE; | ||
| 381 | |||
| 382 | /* | ||
| 383 | * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' | ||
| 384 | */ | ||
| 385 | pg = vmalloc_to_page(va); | ||
| 386 | BUG_ON(!pg); | ||
| 387 | sg_set_page(sg, pg, bytes, 0); | ||
| 388 | |||
| 389 | va += bytes; | ||
| 390 | } | ||
| 391 | |||
| 392 | va_end = _va + PAGE_SIZE * i; | ||
| 393 | } | ||
| 394 | |||
| 395 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | ||
| 396 | { | ||
| 397 | /* | ||
| 398 | * Actually this is not necessary at all, just exists for | ||
| 399 | * consistency of the code readability. | ||
| 400 | */ | ||
| 401 | BUG_ON(!sgt); | ||
| 402 | } | ||
| 403 | |||
| 404 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | ||
| 405 | static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, | ||
| 406 | const struct sg_table *sgt, u32 flags) | ||
| 407 | { | ||
| 408 | int err; | ||
| 409 | unsigned int i, j; | ||
| 410 | struct scatterlist *sg; | ||
| 411 | u32 da = new->da_start; | ||
| 412 | int order; | ||
| 413 | |||
| 414 | if (!domain || !sgt) | ||
| 415 | return -EINVAL; | ||
| 416 | |||
| 417 | BUG_ON(!sgtable_ok(sgt)); | ||
| 418 | |||
| 419 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 420 | u32 pa; | ||
| 421 | size_t bytes; | ||
| 422 | |||
| 423 | pa = sg_phys(sg) - sg->offset; | ||
| 424 | bytes = sg->length + sg->offset; | ||
| 425 | |||
| 426 | flags &= ~IOVMF_PGSZ_MASK; | ||
| 427 | |||
| 428 | if (bytes_to_iopgsz(bytes) < 0) | ||
| 429 | goto err_out; | ||
| 430 | |||
| 431 | order = get_order(bytes); | ||
| 432 | |||
| 433 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | ||
| 434 | i, da, pa, bytes); | ||
| 435 | |||
| 436 | err = iommu_map(domain, da, pa, order, flags); | ||
| 437 | if (err) | ||
| 438 | goto err_out; | ||
| 439 | |||
| 440 | da += bytes; | ||
| 441 | } | ||
| 442 | return 0; | ||
| 443 | |||
| 444 | err_out: | ||
| 445 | da = new->da_start; | ||
| 446 | |||
| 447 | for_each_sg(sgt->sgl, sg, i, j) { | ||
| 448 | size_t bytes; | ||
| 449 | |||
| 450 | bytes = sg->length + sg->offset; | ||
| 451 | order = get_order(bytes); | ||
| 452 | |||
| 453 | /* ignore failures.. we're already handling one */ | ||
| 454 | iommu_unmap(domain, da, order); | ||
| 455 | |||
| 456 | da += bytes; | ||
| 457 | } | ||
| 458 | return err; | ||
| 459 | } | ||
| 460 | |||
| 461 | /* release 'da' <-> 'pa' mapping */ | ||
| 462 | static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, | ||
| 463 | struct iovm_struct *area) | ||
| 464 | { | ||
| 465 | u32 start; | ||
| 466 | size_t total = area->da_end - area->da_start; | ||
| 467 | const struct sg_table *sgt = area->sgt; | ||
| 468 | struct scatterlist *sg; | ||
| 469 | int i, err; | ||
| 470 | |||
| 471 | BUG_ON(!sgtable_ok(sgt)); | ||
| 472 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | ||
| 473 | |||
| 474 | start = area->da_start; | ||
| 475 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 476 | size_t bytes; | ||
| 477 | int order; | ||
| 478 | |||
| 479 | bytes = sg->length + sg->offset; | ||
| 480 | order = get_order(bytes); | ||
| 481 | |||
| 482 | err = iommu_unmap(domain, start, order); | ||
| 483 | if (err < 0) | ||
| 484 | break; | ||
| 485 | |||
| 486 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | ||
| 487 | __func__, start, bytes, area->flags); | ||
| 488 | |||
| 489 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | ||
| 490 | |||
| 491 | total -= bytes; | ||
| 492 | start += bytes; | ||
| 493 | } | ||
| 494 | BUG_ON(total); | ||
| 495 | } | ||
| 496 | |||
| 497 | /* template function for all unmapping */ | ||
| 498 | static struct sg_table *unmap_vm_area(struct iommu_domain *domain, | ||
| 499 | struct omap_iommu *obj, const u32 da, | ||
| 500 | void (*fn)(const void *), u32 flags) | ||
| 501 | { | ||
| 502 | struct sg_table *sgt = NULL; | ||
| 503 | struct iovm_struct *area; | ||
| 504 | |||
| 505 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | ||
| 506 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | ||
| 507 | return NULL; | ||
| 508 | } | ||
| 509 | |||
| 510 | mutex_lock(&obj->mmap_lock); | ||
| 511 | |||
| 512 | area = __find_iovm_area(obj, da); | ||
| 513 | if (!area) { | ||
| 514 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | ||
| 515 | goto out; | ||
| 516 | } | ||
| 517 | |||
| 518 | if ((area->flags & flags) != flags) { | ||
| 519 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | ||
| 520 | area->flags); | ||
| 521 | goto out; | ||
| 522 | } | ||
| 523 | sgt = (struct sg_table *)area->sgt; | ||
| 524 | |||
| 525 | unmap_iovm_area(domain, obj, area); | ||
| 526 | |||
| 527 | fn(area->va); | ||
| 528 | |||
| 529 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | ||
| 530 | area->da_start, da, area->da_end, | ||
| 531 | area->da_end - area->da_start, area->flags); | ||
| 532 | |||
| 533 | free_iovm_area(obj, area); | ||
| 534 | out: | ||
| 535 | mutex_unlock(&obj->mmap_lock); | ||
| 536 | |||
| 537 | return sgt; | ||
| 538 | } | ||
| 539 | |||
| 540 | static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, | ||
| 541 | u32 da, const struct sg_table *sgt, void *va, | ||
| 542 | size_t bytes, u32 flags) | ||
| 543 | { | ||
| 544 | int err = -ENOMEM; | ||
| 545 | struct iovm_struct *new; | ||
| 546 | |||
| 547 | mutex_lock(&obj->mmap_lock); | ||
| 548 | |||
| 549 | new = alloc_iovm_area(obj, da, bytes, flags); | ||
| 550 | if (IS_ERR(new)) { | ||
| 551 | err = PTR_ERR(new); | ||
| 552 | goto err_alloc_iovma; | ||
| 553 | } | ||
| 554 | new->va = va; | ||
| 555 | new->sgt = sgt; | ||
| 556 | |||
| 557 | if (map_iovm_area(domain, new, sgt, new->flags)) | ||
| 558 | goto err_map; | ||
| 559 | |||
| 560 | mutex_unlock(&obj->mmap_lock); | ||
| 561 | |||
| 562 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | ||
| 563 | __func__, new->da_start, bytes, new->flags, va); | ||
| 564 | |||
| 565 | return new->da_start; | ||
| 566 | |||
| 567 | err_map: | ||
| 568 | free_iovm_area(obj, new); | ||
| 569 | err_alloc_iovma: | ||
| 570 | mutex_unlock(&obj->mmap_lock); | ||
| 571 | return err; | ||
| 572 | } | ||
| 573 | |||
| 574 | static inline u32 | ||
| 575 | __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, | ||
| 576 | u32 da, const struct sg_table *sgt, | ||
| 577 | void *va, size_t bytes, u32 flags) | ||
| 578 | { | ||
| 579 | return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); | ||
| 580 | } | ||
| 581 | |||
| 582 | /** | ||
| 583 | * omap_iommu_vmap - (d)-(p)-(v) address mapper | ||
| 584 | * @obj: objective iommu | ||
| 585 | * @sgt: address of scatter gather table | ||
| 586 | * @flags: iovma and page property | ||
| 587 | * | ||
| 588 | * Creates 1-n-1 mapping with given @sgt and returns @da. | ||
| 589 | * All @sgt element must be io page size aligned. | ||
| 590 | */ | ||
| 591 | u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, | ||
| 592 | const struct sg_table *sgt, u32 flags) | ||
| 593 | { | ||
| 594 | size_t bytes; | ||
| 595 | void *va = NULL; | ||
| 596 | |||
| 597 | if (!obj || !obj->dev || !sgt) | ||
| 598 | return -EINVAL; | ||
| 599 | |||
| 600 | bytes = sgtable_len(sgt); | ||
| 601 | if (!bytes) | ||
| 602 | return -EINVAL; | ||
| 603 | bytes = PAGE_ALIGN(bytes); | ||
| 604 | |||
| 605 | if (flags & IOVMF_MMIO) { | ||
| 606 | va = vmap_sg(sgt); | ||
| 607 | if (IS_ERR(va)) | ||
| 608 | return PTR_ERR(va); | ||
| 609 | } | ||
| 610 | |||
| 611 | flags |= IOVMF_DISCONT; | ||
| 612 | flags |= IOVMF_MMIO; | ||
| 613 | |||
| 614 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); | ||
| 615 | if (IS_ERR_VALUE(da)) | ||
| 616 | vunmap_sg(va); | ||
| 617 | |||
| 618 | return da + sgtable_offset(sgt); | ||
| 619 | } | ||
| 620 | EXPORT_SYMBOL_GPL(omap_iommu_vmap); | ||
| 621 | |||
| 622 | /** | ||
| 623 | * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' | ||
| 624 | * @obj: objective iommu | ||
| 625 | * @da: iommu device virtual address | ||
| 626 | * | ||
| 627 | * Free the iommu virtually contiguous memory area starting at | ||
| 628 | * @da, which was returned by 'omap_iommu_vmap()'. | ||
| 629 | */ | ||
| 630 | struct sg_table * | ||
| 631 | omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da) | ||
| 632 | { | ||
| 633 | struct sg_table *sgt; | ||
| 634 | /* | ||
| 635 | * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. | ||
| 636 | * Just returns 'sgt' to the caller to free | ||
| 637 | */ | ||
| 638 | da &= PAGE_MASK; | ||
| 639 | sgt = unmap_vm_area(domain, obj, da, vunmap_sg, | ||
| 640 | IOVMF_DISCONT | IOVMF_MMIO); | ||
| 641 | if (!sgt) | ||
| 642 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
| 643 | return sgt; | ||
| 644 | } | ||
| 645 | EXPORT_SYMBOL_GPL(omap_iommu_vunmap); | ||
| 646 | |||
| 647 | /** | ||
| 648 | * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | ||
| 649 | * @obj: objective iommu | ||
| 650 | * @da: contiguous iommu virtual memory | ||
| 651 | * @bytes: allocation size | ||
| 652 | * @flags: iovma and page property | ||
| 653 | * | ||
| 654 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | ||
| 655 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. | ||
| 656 | */ | ||
| 657 | u32 | ||
| 658 | omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, | ||
| 659 | size_t bytes, u32 flags) | ||
| 660 | { | ||
| 661 | void *va; | ||
| 662 | struct sg_table *sgt; | ||
| 663 | |||
| 664 | if (!obj || !obj->dev || !bytes) | ||
| 665 | return -EINVAL; | ||
| 666 | |||
| 667 | bytes = PAGE_ALIGN(bytes); | ||
| 668 | |||
| 669 | va = vmalloc(bytes); | ||
| 670 | if (!va) | ||
| 671 | return -ENOMEM; | ||
| 672 | |||
| 673 | flags |= IOVMF_DISCONT; | ||
| 674 | flags |= IOVMF_ALLOC; | ||
| 675 | |||
| 676 | sgt = sgtable_alloc(bytes, flags, da, 0); | ||
| 677 | if (IS_ERR(sgt)) { | ||
| 678 | da = PTR_ERR(sgt); | ||
| 679 | goto err_sgt_alloc; | ||
| 680 | } | ||
| 681 | sgtable_fill_vmalloc(sgt, va); | ||
| 682 | |||
| 683 | da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); | ||
| 684 | if (IS_ERR_VALUE(da)) | ||
| 685 | goto err_iommu_vmap; | ||
| 686 | |||
| 687 | return da; | ||
| 688 | |||
| 689 | err_iommu_vmap: | ||
| 690 | sgtable_drain_vmalloc(sgt); | ||
| 691 | sgtable_free(sgt); | ||
| 692 | err_sgt_alloc: | ||
| 693 | vfree(va); | ||
| 694 | return da; | ||
| 695 | } | ||
| 696 | EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); | ||
| 697 | |||
| 698 | /** | ||
| 699 | * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' | ||
| 700 | * @obj: objective iommu | ||
| 701 | * @da: iommu device virtual address | ||
| 702 | * | ||
| 703 | * Frees the iommu virtually continuous memory area starting at | ||
| 704 | * @da, as obtained from 'omap_iommu_vmalloc()'. | ||
| 705 | */ | ||
| 706 | void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, | ||
| 707 | const u32 da) | ||
| 708 | { | ||
| 709 | struct sg_table *sgt; | ||
| 710 | |||
| 711 | sgt = unmap_vm_area(domain, obj, da, vfree, | ||
| 712 | IOVMF_DISCONT | IOVMF_ALLOC); | ||
| 713 | if (!sgt) | ||
| 714 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | ||
| 715 | sgtable_free(sgt); | ||
| 716 | } | ||
| 717 | EXPORT_SYMBOL_GPL(omap_iommu_vfree); | ||
| 718 | |||
| 719 | static int __init iovmm_init(void) | ||
| 720 | { | ||
| 721 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | ||
| 722 | struct kmem_cache *p; | ||
| 723 | |||
| 724 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | ||
| 725 | flags, NULL); | ||
| 726 | if (!p) | ||
| 727 | return -ENOMEM; | ||
| 728 | iovm_area_cachep = p; | ||
| 729 | |||
| 730 | return 0; | ||
| 731 | } | ||
| 732 | module_init(iovmm_init); | ||
| 733 | |||
| 734 | static void __exit iovmm_exit(void) | ||
| 735 | { | ||
| 736 | kmem_cache_destroy(iovm_area_cachep); | ||
| 737 | } | ||
| 738 | module_exit(iovmm_exit); | ||
| 739 | |||
| 740 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | ||
| 741 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | ||
| 742 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index f574dc012cad..620106937ec6 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
| @@ -763,8 +763,7 @@ source "drivers/media/video/m5mols/Kconfig" | |||
| 763 | 763 | ||
| 764 | config VIDEO_OMAP3 | 764 | config VIDEO_OMAP3 |
| 765 | tristate "OMAP 3 Camera support (EXPERIMENTAL)" | 765 | tristate "OMAP 3 Camera support (EXPERIMENTAL)" |
| 766 | select OMAP_IOMMU | 766 | depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL |
| 767 | depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL | ||
| 768 | ---help--- | 767 | ---help--- |
| 769 | Driver for an OMAP 3 camera controller. | 768 | Driver for an OMAP 3 camera controller. |
| 770 | 769 | ||
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c index 5cea2bbd7014..a7ed98596883 100644 --- a/drivers/media/video/omap3isp/isp.c +++ b/drivers/media/video/omap3isp/isp.c | |||
| @@ -80,6 +80,13 @@ | |||
| 80 | #include "isph3a.h" | 80 | #include "isph3a.h" |
| 81 | #include "isphist.h" | 81 | #include "isphist.h" |
| 82 | 82 | ||
| 83 | /* | ||
| 84 | * this is provided as an interim solution until omap3isp doesn't need | ||
| 85 | * any omap-specific iommu API | ||
| 86 | */ | ||
| 87 | #define to_iommu(dev) \ | ||
| 88 | (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) | ||
| 89 | |||
| 83 | static unsigned int autoidle; | 90 | static unsigned int autoidle; |
| 84 | module_param(autoidle, int, 0444); | 91 | module_param(autoidle, int, 0444); |
| 85 | MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support"); | 92 | MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support"); |
| @@ -1108,7 +1115,7 @@ static void isp_save_ctx(struct isp_device *isp) | |||
| 1108 | { | 1115 | { |
| 1109 | isp_save_context(isp, isp_reg_list); | 1116 | isp_save_context(isp, isp_reg_list); |
| 1110 | if (isp->iommu) | 1117 | if (isp->iommu) |
| 1111 | iommu_save_ctx(isp->iommu); | 1118 | omap_iommu_save_ctx(isp->iommu); |
| 1112 | } | 1119 | } |
| 1113 | 1120 | ||
| 1114 | /* | 1121 | /* |
| @@ -1122,7 +1129,7 @@ static void isp_restore_ctx(struct isp_device *isp) | |||
| 1122 | { | 1129 | { |
| 1123 | isp_restore_context(isp, isp_reg_list); | 1130 | isp_restore_context(isp, isp_reg_list); |
| 1124 | if (isp->iommu) | 1131 | if (isp->iommu) |
| 1125 | iommu_restore_ctx(isp->iommu); | 1132 | omap_iommu_restore_ctx(isp->iommu); |
| 1126 | omap3isp_ccdc_restore_context(isp); | 1133 | omap3isp_ccdc_restore_context(isp); |
| 1127 | omap3isp_preview_restore_context(isp); | 1134 | omap3isp_preview_restore_context(isp); |
| 1128 | } | 1135 | } |
| @@ -1975,7 +1982,8 @@ static int isp_remove(struct platform_device *pdev) | |||
| 1975 | isp_cleanup_modules(isp); | 1982 | isp_cleanup_modules(isp); |
| 1976 | 1983 | ||
| 1977 | omap3isp_get(isp); | 1984 | omap3isp_get(isp); |
| 1978 | iommu_put(isp->iommu); | 1985 | iommu_detach_device(isp->domain, isp->iommu_dev); |
| 1986 | iommu_domain_free(isp->domain); | ||
| 1979 | omap3isp_put(isp); | 1987 | omap3isp_put(isp); |
| 1980 | 1988 | ||
| 1981 | free_irq(isp->irq_num, isp); | 1989 | free_irq(isp->irq_num, isp); |
| @@ -2123,25 +2131,41 @@ static int isp_probe(struct platform_device *pdev) | |||
| 2123 | } | 2131 | } |
| 2124 | 2132 | ||
| 2125 | /* IOMMU */ | 2133 | /* IOMMU */ |
| 2126 | isp->iommu = iommu_get("isp"); | 2134 | isp->iommu_dev = omap_find_iommu_device("isp"); |
| 2127 | if (IS_ERR_OR_NULL(isp->iommu)) { | 2135 | if (!isp->iommu_dev) { |
| 2128 | isp->iommu = NULL; | 2136 | dev_err(isp->dev, "omap_find_iommu_device failed\n"); |
| 2129 | ret = -ENODEV; | 2137 | ret = -ENODEV; |
| 2130 | goto error_isp; | 2138 | goto error_isp; |
| 2131 | } | 2139 | } |
| 2132 | 2140 | ||
| 2141 | /* to be removed once iommu migration is complete */ | ||
| 2142 | isp->iommu = to_iommu(isp->iommu_dev); | ||
| 2143 | |||
| 2144 | isp->domain = iommu_domain_alloc(pdev->dev.bus); | ||
| 2145 | if (!isp->domain) { | ||
| 2146 | dev_err(isp->dev, "can't alloc iommu domain\n"); | ||
| 2147 | ret = -ENOMEM; | ||
| 2148 | goto error_isp; | ||
| 2149 | } | ||
| 2150 | |||
| 2151 | ret = iommu_attach_device(isp->domain, isp->iommu_dev); | ||
| 2152 | if (ret) { | ||
| 2153 | dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret); | ||
| 2154 | goto free_domain; | ||
| 2155 | } | ||
| 2156 | |||
| 2133 | /* Interrupt */ | 2157 | /* Interrupt */ |
| 2134 | isp->irq_num = platform_get_irq(pdev, 0); | 2158 | isp->irq_num = platform_get_irq(pdev, 0); |
| 2135 | if (isp->irq_num <= 0) { | 2159 | if (isp->irq_num <= 0) { |
| 2136 | dev_err(isp->dev, "No IRQ resource\n"); | 2160 | dev_err(isp->dev, "No IRQ resource\n"); |
| 2137 | ret = -ENODEV; | 2161 | ret = -ENODEV; |
| 2138 | goto error_isp; | 2162 | goto detach_dev; |
| 2139 | } | 2163 | } |
| 2140 | 2164 | ||
| 2141 | if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { | 2165 | if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { |
| 2142 | dev_err(isp->dev, "Unable to request IRQ\n"); | 2166 | dev_err(isp->dev, "Unable to request IRQ\n"); |
| 2143 | ret = -EINVAL; | 2167 | ret = -EINVAL; |
| 2144 | goto error_isp; | 2168 | goto detach_dev; |
| 2145 | } | 2169 | } |
| 2146 | 2170 | ||
| 2147 | /* Entities */ | 2171 | /* Entities */ |
| @@ -2162,8 +2186,11 @@ error_modules: | |||
| 2162 | isp_cleanup_modules(isp); | 2186 | isp_cleanup_modules(isp); |
| 2163 | error_irq: | 2187 | error_irq: |
| 2164 | free_irq(isp->irq_num, isp); | 2188 | free_irq(isp->irq_num, isp); |
| 2189 | detach_dev: | ||
| 2190 | iommu_detach_device(isp->domain, isp->iommu_dev); | ||
| 2191 | free_domain: | ||
| 2192 | iommu_domain_free(isp->domain); | ||
| 2165 | error_isp: | 2193 | error_isp: |
| 2166 | iommu_put(isp->iommu); | ||
| 2167 | omap3isp_put(isp); | 2194 | omap3isp_put(isp); |
| 2168 | error: | 2195 | error: |
| 2169 | isp_put_clocks(isp); | 2196 | isp_put_clocks(isp); |
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h index 529e582ef948..81fdd85deb60 100644 --- a/drivers/media/video/omap3isp/isp.h +++ b/drivers/media/video/omap3isp/isp.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
| 33 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
| 34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
| 35 | #include <linux/iommu.h> | ||
| 35 | #include <plat/iommu.h> | 36 | #include <plat/iommu.h> |
| 36 | #include <plat/iovmm.h> | 37 | #include <plat/iovmm.h> |
| 37 | 38 | ||
| @@ -294,7 +295,9 @@ struct isp_device { | |||
| 294 | unsigned int sbl_resources; | 295 | unsigned int sbl_resources; |
| 295 | unsigned int subclk_resources; | 296 | unsigned int subclk_resources; |
| 296 | 297 | ||
| 297 | struct iommu *iommu; | 298 | struct omap_iommu *iommu; |
| 299 | struct iommu_domain *domain; | ||
| 300 | struct device *iommu_dev; | ||
| 298 | 301 | ||
| 299 | struct isp_platform_callback platform_cb; | 302 | struct isp_platform_callback platform_cb; |
| 300 | }; | 303 | }; |
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 80796eb0c53e..892671922f8a 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c | |||
| @@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc, | |||
| 366 | dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, | 366 | dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, |
| 367 | req->iovm->sgt->nents, DMA_TO_DEVICE); | 367 | req->iovm->sgt->nents, DMA_TO_DEVICE); |
| 368 | if (req->table) | 368 | if (req->table) |
| 369 | iommu_vfree(isp->iommu, req->table); | 369 | omap_iommu_vfree(isp->domain, isp->iommu, req->table); |
| 370 | kfree(req); | 370 | kfree(req); |
| 371 | } | 371 | } |
| 372 | 372 | ||
| @@ -438,15 +438,15 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, | |||
| 438 | 438 | ||
| 439 | req->enable = 1; | 439 | req->enable = 1; |
| 440 | 440 | ||
| 441 | req->table = iommu_vmalloc(isp->iommu, 0, req->config.size, | 441 | req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0, |
| 442 | IOMMU_FLAG); | 442 | req->config.size, IOMMU_FLAG); |
| 443 | if (IS_ERR_VALUE(req->table)) { | 443 | if (IS_ERR_VALUE(req->table)) { |
| 444 | req->table = 0; | 444 | req->table = 0; |
| 445 | ret = -ENOMEM; | 445 | ret = -ENOMEM; |
| 446 | goto done; | 446 | goto done; |
| 447 | } | 447 | } |
| 448 | 448 | ||
| 449 | req->iovm = find_iovm_area(isp->iommu, req->table); | 449 | req->iovm = omap_find_iovm_area(isp->iommu, req->table); |
| 450 | if (req->iovm == NULL) { | 450 | if (req->iovm == NULL) { |
| 451 | ret = -ENOMEM; | 451 | ret = -ENOMEM; |
| 452 | goto done; | 452 | goto done; |
| @@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, | |||
| 462 | dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, | 462 | dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, |
| 463 | req->iovm->sgt->nents, DMA_TO_DEVICE); | 463 | req->iovm->sgt->nents, DMA_TO_DEVICE); |
| 464 | 464 | ||
| 465 | table = da_to_va(isp->iommu, req->table); | 465 | table = omap_da_to_va(isp->iommu, req->table); |
| 466 | if (copy_from_user(table, config->lsc, req->config.size)) { | 466 | if (copy_from_user(table, config->lsc, req->config.size)) { |
| 467 | ret = -EFAULT; | 467 | ret = -EFAULT; |
| 468 | goto done; | 468 | goto done; |
| @@ -731,18 +731,19 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, | |||
| 731 | 731 | ||
| 732 | /* | 732 | /* |
| 733 | * table_new must be 64-bytes aligned, but it's | 733 | * table_new must be 64-bytes aligned, but it's |
| 734 | * already done by iommu_vmalloc(). | 734 | * already done by omap_iommu_vmalloc(). |
| 735 | */ | 735 | */ |
| 736 | size = ccdc->fpc.fpnum * 4; | 736 | size = ccdc->fpc.fpnum * 4; |
| 737 | table_new = iommu_vmalloc(isp->iommu, 0, size, | 737 | table_new = omap_iommu_vmalloc(isp->domain, isp->iommu, |
| 738 | IOMMU_FLAG); | 738 | 0, size, IOMMU_FLAG); |
| 739 | if (IS_ERR_VALUE(table_new)) | 739 | if (IS_ERR_VALUE(table_new)) |
| 740 | return -ENOMEM; | 740 | return -ENOMEM; |
| 741 | 741 | ||
| 742 | if (copy_from_user(da_to_va(isp->iommu, table_new), | 742 | if (copy_from_user(omap_da_to_va(isp->iommu, table_new), |
| 743 | (__force void __user *) | 743 | (__force void __user *) |
| 744 | ccdc->fpc.fpcaddr, size)) { | 744 | ccdc->fpc.fpcaddr, size)) { |
| 745 | iommu_vfree(isp->iommu, table_new); | 745 | omap_iommu_vfree(isp->domain, isp->iommu, |
| 746 | table_new); | ||
| 746 | return -EFAULT; | 747 | return -EFAULT; |
| 747 | } | 748 | } |
| 748 | 749 | ||
| @@ -752,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, | |||
| 752 | 753 | ||
| 753 | ccdc_configure_fpc(ccdc); | 754 | ccdc_configure_fpc(ccdc); |
| 754 | if (table_old != 0) | 755 | if (table_old != 0) |
| 755 | iommu_vfree(isp->iommu, table_old); | 756 | omap_iommu_vfree(isp->domain, isp->iommu, table_old); |
| 756 | } | 757 | } |
| 757 | 758 | ||
| 758 | return ccdc_lsc_config(ccdc, ccdc_struct); | 759 | return ccdc_lsc_config(ccdc, ccdc_struct); |
| @@ -2287,5 +2288,5 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp) | |||
| 2287 | ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); | 2288 | ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); |
| 2288 | 2289 | ||
| 2289 | if (ccdc->fpc.fpcaddr != 0) | 2290 | if (ccdc->fpc.fpcaddr != 0) |
| 2290 | iommu_vfree(isp->iommu, ccdc->fpc.fpcaddr); | 2291 | omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr); |
| 2291 | } | 2292 | } |
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c index 808065948ac1..732905552261 100644 --- a/drivers/media/video/omap3isp/ispstat.c +++ b/drivers/media/video/omap3isp/ispstat.c | |||
| @@ -366,7 +366,8 @@ static void isp_stat_bufs_free(struct ispstat *stat) | |||
| 366 | dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, | 366 | dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, |
| 367 | buf->iovm->sgt->nents, | 367 | buf->iovm->sgt->nents, |
| 368 | DMA_FROM_DEVICE); | 368 | DMA_FROM_DEVICE); |
| 369 | iommu_vfree(isp->iommu, buf->iommu_addr); | 369 | omap_iommu_vfree(isp->domain, isp->iommu, |
| 370 | buf->iommu_addr); | ||
| 370 | } else { | 371 | } else { |
| 371 | if (!buf->virt_addr) | 372 | if (!buf->virt_addr) |
| 372 | continue; | 373 | continue; |
| @@ -399,8 +400,8 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) | |||
| 399 | struct iovm_struct *iovm; | 400 | struct iovm_struct *iovm; |
| 400 | 401 | ||
| 401 | WARN_ON(buf->dma_addr); | 402 | WARN_ON(buf->dma_addr); |
| 402 | buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size, | 403 | buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0, |
| 403 | IOMMU_FLAG); | 404 | size, IOMMU_FLAG); |
| 404 | if (IS_ERR((void *)buf->iommu_addr)) { | 405 | if (IS_ERR((void *)buf->iommu_addr)) { |
| 405 | dev_err(stat->isp->dev, | 406 | dev_err(stat->isp->dev, |
| 406 | "%s: Can't acquire memory for " | 407 | "%s: Can't acquire memory for " |
| @@ -409,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) | |||
| 409 | return -ENOMEM; | 410 | return -ENOMEM; |
| 410 | } | 411 | } |
| 411 | 412 | ||
| 412 | iovm = find_iovm_area(isp->iommu, buf->iommu_addr); | 413 | iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr); |
| 413 | if (!iovm || | 414 | if (!iovm || |
| 414 | !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, | 415 | !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, |
| 415 | DMA_FROM_DEVICE)) { | 416 | DMA_FROM_DEVICE)) { |
| @@ -418,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) | |||
| 418 | } | 419 | } |
| 419 | buf->iovm = iovm; | 420 | buf->iovm = iovm; |
| 420 | 421 | ||
| 421 | buf->virt_addr = da_to_va(stat->isp->iommu, | 422 | buf->virt_addr = omap_da_to_va(stat->isp->iommu, |
| 422 | (u32)buf->iommu_addr); | 423 | (u32)buf->iommu_addr); |
| 423 | buf->empty = 1; | 424 | buf->empty = 1; |
| 424 | dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." | 425 | dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." |
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c index fd965adfd597..912ac071b104 100644 --- a/drivers/media/video/omap3isp/ispvideo.c +++ b/drivers/media/video/omap3isp/ispvideo.c | |||
| @@ -446,7 +446,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen) | |||
| 446 | sgt->nents = sglen; | 446 | sgt->nents = sglen; |
| 447 | sgt->orig_nents = sglen; | 447 | sgt->orig_nents = sglen; |
| 448 | 448 | ||
| 449 | da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG); | 449 | da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG); |
| 450 | if (IS_ERR_VALUE(da)) | 450 | if (IS_ERR_VALUE(da)) |
| 451 | kfree(sgt); | 451 | kfree(sgt); |
| 452 | 452 | ||
| @@ -462,7 +462,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da) | |||
| 462 | { | 462 | { |
| 463 | struct sg_table *sgt; | 463 | struct sg_table *sgt; |
| 464 | 464 | ||
| 465 | sgt = iommu_vunmap(isp->iommu, (u32)da); | 465 | sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da); |
| 466 | kfree(sgt); | 466 | kfree(sgt); |
| 467 | } | 467 | } |
| 468 | 468 | ||
