diff options
author | Cho KyongHo <pullip.cho@samsung.com> | 2014-05-12 02:14:58 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2014-05-13 13:12:57 -0400 |
commit | d09d78fc986be8355928256d1b86b713588999d7 (patch) | |
tree | 3d44e91b5584a3f53bf0b31e949d3bf24991de75 /drivers/iommu/exynos-iommu.c | |
parent | 9d4e7a24d77a05fb5c4e4121051a8d80501c74d3 (diff) |
iommu/exynos: Use exynos-iommu specific typedef
This commit introduces sysmmu_pte_t for page table entries and
sysmmu_iova_t vor I/O virtual address that is manipulated by
exynos-iommu driver. The purpose of the typedef is to remove
dependencies to the driver code from the change of CPU architecture
from 32 bit to 64 bit.
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
Signed-off-by: Shaik Ameer Basha <shaik.ameer@samsung.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/exynos-iommu.c')
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 101 |
1 files changed, 59 insertions, 42 deletions
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index d89ad5f8747c..3291619c9db0 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -29,6 +29,9 @@ | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | 31 | ||
32 | typedef u32 sysmmu_iova_t; | ||
33 | typedef u32 sysmmu_pte_t; | ||
34 | |||
32 | /* We does not consider super section mapping (16MB) */ | 35 | /* We does not consider super section mapping (16MB) */ |
33 | #define SECT_ORDER 20 | 36 | #define SECT_ORDER 20 |
34 | #define LPAGE_ORDER 16 | 37 | #define LPAGE_ORDER 16 |
@@ -50,20 +53,32 @@ | |||
50 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) | 53 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) |
51 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) | 54 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) |
52 | 55 | ||
56 | static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size) | ||
57 | { | ||
58 | return iova & (size - 1); | ||
59 | } | ||
60 | |||
53 | #define section_phys(sent) (*(sent) & SECT_MASK) | 61 | #define section_phys(sent) (*(sent) & SECT_MASK) |
54 | #define section_offs(iova) ((iova) & 0xFFFFF) | 62 | #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE) |
55 | #define lpage_phys(pent) (*(pent) & LPAGE_MASK) | 63 | #define lpage_phys(pent) (*(pent) & LPAGE_MASK) |
56 | #define lpage_offs(iova) ((iova) & 0xFFFF) | 64 | #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE) |
57 | #define spage_phys(pent) (*(pent) & SPAGE_MASK) | 65 | #define spage_phys(pent) (*(pent) & SPAGE_MASK) |
58 | #define spage_offs(iova) ((iova) & 0xFFF) | 66 | #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE) |
59 | |||
60 | #define lv1ent_offset(iova) ((iova) >> SECT_ORDER) | ||
61 | #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER) | ||
62 | 67 | ||
63 | #define NUM_LV1ENTRIES 4096 | 68 | #define NUM_LV1ENTRIES 4096 |
64 | #define NUM_LV2ENTRIES 256 | 69 | #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) |
65 | 70 | ||
66 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long)) | 71 | static u32 lv1ent_offset(sysmmu_iova_t iova) |
72 | { | ||
73 | return iova >> SECT_ORDER; | ||
74 | } | ||
75 | |||
76 | static u32 lv2ent_offset(sysmmu_iova_t iova) | ||
77 | { | ||
78 | return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); | ||
79 | } | ||
80 | |||
81 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) | ||
67 | 82 | ||
68 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) | 83 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) |
69 | 84 | ||
@@ -101,14 +116,14 @@ | |||
101 | 116 | ||
102 | static struct kmem_cache *lv2table_kmem_cache; | 117 | static struct kmem_cache *lv2table_kmem_cache; |
103 | 118 | ||
104 | static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova) | 119 | static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) |
105 | { | 120 | { |
106 | return pgtable + lv1ent_offset(iova); | 121 | return pgtable + lv1ent_offset(iova); |
107 | } | 122 | } |
108 | 123 | ||
109 | static unsigned long *page_entry(unsigned long *sent, unsigned long iova) | 124 | static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) |
110 | { | 125 | { |
111 | return (unsigned long *)phys_to_virt( | 126 | return (sysmmu_pte_t *)phys_to_virt( |
112 | lv2table_base(sent)) + lv2ent_offset(iova); | 127 | lv2table_base(sent)) + lv2ent_offset(iova); |
113 | } | 128 | } |
114 | 129 | ||
@@ -150,7 +165,7 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = { | |||
150 | 165 | ||
151 | struct exynos_iommu_domain { | 166 | struct exynos_iommu_domain { |
152 | struct list_head clients; /* list of sysmmu_drvdata.node */ | 167 | struct list_head clients; /* list of sysmmu_drvdata.node */ |
153 | unsigned long *pgtable; /* lv1 page table, 16KB */ | 168 | sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ |
154 | short *lv2entcnt; /* free lv2 entry counter for each section */ | 169 | short *lv2entcnt; /* free lv2 entry counter for each section */ |
155 | spinlock_t lock; /* lock for this structure */ | 170 | spinlock_t lock; /* lock for this structure */ |
156 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ | 171 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ |
@@ -215,7 +230,7 @@ static void __sysmmu_tlb_invalidate(void __iomem *sfrbase) | |||
215 | } | 230 | } |
216 | 231 | ||
217 | static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, | 232 | static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, |
218 | unsigned long iova, unsigned int num_inv) | 233 | sysmmu_iova_t iova, unsigned int num_inv) |
219 | { | 234 | { |
220 | unsigned int i; | 235 | unsigned int i; |
221 | for (i = 0; i < num_inv; i++) { | 236 | for (i = 0; i < num_inv; i++) { |
@@ -226,7 +241,7 @@ static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, | |||
226 | } | 241 | } |
227 | 242 | ||
228 | static void __sysmmu_set_ptbase(void __iomem *sfrbase, | 243 | static void __sysmmu_set_ptbase(void __iomem *sfrbase, |
229 | unsigned long pgd) | 244 | phys_addr_t pgd) |
230 | { | 245 | { |
231 | __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */ | 246 | __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */ |
232 | __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); | 247 | __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR); |
@@ -236,22 +251,22 @@ static void __sysmmu_set_ptbase(void __iomem *sfrbase, | |||
236 | 251 | ||
237 | static void show_fault_information(const char *name, | 252 | static void show_fault_information(const char *name, |
238 | enum exynos_sysmmu_inttype itype, | 253 | enum exynos_sysmmu_inttype itype, |
239 | phys_addr_t pgtable_base, unsigned long fault_addr) | 254 | phys_addr_t pgtable_base, sysmmu_iova_t fault_addr) |
240 | { | 255 | { |
241 | unsigned long *ent; | 256 | sysmmu_pte_t *ent; |
242 | 257 | ||
243 | if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) | 258 | if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT)) |
244 | itype = SYSMMU_FAULT_UNKNOWN; | 259 | itype = SYSMMU_FAULT_UNKNOWN; |
245 | 260 | ||
246 | pr_err("%s occurred at %#lx by %s(Page table base: %pa)\n", | 261 | pr_err("%s occurred at %#x by %s(Page table base: %pa)\n", |
247 | sysmmu_fault_name[itype], fault_addr, name, &pgtable_base); | 262 | sysmmu_fault_name[itype], fault_addr, name, &pgtable_base); |
248 | 263 | ||
249 | ent = section_entry(phys_to_virt(pgtable_base), fault_addr); | 264 | ent = section_entry(phys_to_virt(pgtable_base), fault_addr); |
250 | pr_err("\tLv1 entry: 0x%lx\n", *ent); | 265 | pr_err("\tLv1 entry: %#x\n", *ent); |
251 | 266 | ||
252 | if (lv1ent_page(ent)) { | 267 | if (lv1ent_page(ent)) { |
253 | ent = page_entry(ent, fault_addr); | 268 | ent = page_entry(ent, fault_addr); |
254 | pr_err("\t Lv2 entry: 0x%lx\n", *ent); | 269 | pr_err("\t Lv2 entry: %#x\n", *ent); |
255 | } | 270 | } |
256 | } | 271 | } |
257 | 272 | ||
@@ -260,7 +275,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | |||
260 | /* SYSMMU is in blocked when interrupt occurred. */ | 275 | /* SYSMMU is in blocked when interrupt occurred. */ |
261 | struct sysmmu_drvdata *data = dev_id; | 276 | struct sysmmu_drvdata *data = dev_id; |
262 | enum exynos_sysmmu_inttype itype; | 277 | enum exynos_sysmmu_inttype itype; |
263 | unsigned long addr = -1; | 278 | sysmmu_iova_t addr = -1; |
264 | int ret = -ENOSYS; | 279 | int ret = -ENOSYS; |
265 | 280 | ||
266 | WARN_ON(!is_sysmmu_active(data)); | 281 | WARN_ON(!is_sysmmu_active(data)); |
@@ -284,7 +299,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | |||
284 | __func__); | 299 | __func__); |
285 | BUG(); | 300 | BUG(); |
286 | } else { | 301 | } else { |
287 | unsigned long base = | 302 | unsigned int base = |
288 | __raw_readl(data->sfrbase + REG_PT_BASE_ADDR); | 303 | __raw_readl(data->sfrbase + REG_PT_BASE_ADDR); |
289 | show_fault_information(dev_name(data->sysmmu), | 304 | show_fault_information(dev_name(data->sysmmu), |
290 | itype, base, addr); | 305 | itype, base, addr); |
@@ -349,7 +364,7 @@ finish: | |||
349 | * enabled before. | 364 | * enabled before. |
350 | */ | 365 | */ |
351 | static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, | 366 | static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data, |
352 | unsigned long pgtable, struct iommu_domain *domain) | 367 | phys_addr_t pgtable, struct iommu_domain *domain) |
353 | { | 368 | { |
354 | int ret = 0; | 369 | int ret = 0; |
355 | unsigned long flags; | 370 | unsigned long flags; |
@@ -390,7 +405,7 @@ finish: | |||
390 | return ret; | 405 | return ret; |
391 | } | 406 | } |
392 | 407 | ||
393 | int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable) | 408 | int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable) |
394 | { | 409 | { |
395 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); | 410 | struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu); |
396 | int ret; | 411 | int ret; |
@@ -426,7 +441,7 @@ static bool exynos_sysmmu_disable(struct device *dev) | |||
426 | return disabled; | 441 | return disabled; |
427 | } | 442 | } |
428 | 443 | ||
429 | static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova, | 444 | static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, |
430 | size_t size) | 445 | size_t size) |
431 | { | 446 | { |
432 | unsigned long flags; | 447 | unsigned long flags; |
@@ -577,7 +592,7 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) | |||
577 | if (!priv) | 592 | if (!priv) |
578 | return -ENOMEM; | 593 | return -ENOMEM; |
579 | 594 | ||
580 | priv->pgtable = (unsigned long *)__get_free_pages( | 595 | priv->pgtable = (sysmmu_pte_t *)__get_free_pages( |
581 | GFP_KERNEL | __GFP_ZERO, 2); | 596 | GFP_KERNEL | __GFP_ZERO, 2); |
582 | if (!priv->pgtable) | 597 | if (!priv->pgtable) |
583 | goto err_pgtable; | 598 | goto err_pgtable; |
@@ -716,19 +731,19 @@ finish: | |||
716 | pm_runtime_put(data->sysmmu); | 731 | pm_runtime_put(data->sysmmu); |
717 | } | 732 | } |
718 | 733 | ||
719 | static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, | 734 | static sysmmu_pte_t *alloc_lv2entry(sysmmu_pte_t *sent, sysmmu_iova_t iova, |
720 | short *pgcounter) | 735 | short *pgcounter) |
721 | { | 736 | { |
722 | if (lv1ent_section(sent)) { | 737 | if (lv1ent_section(sent)) { |
723 | WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova); | 738 | WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); |
724 | return ERR_PTR(-EADDRINUSE); | 739 | return ERR_PTR(-EADDRINUSE); |
725 | } | 740 | } |
726 | 741 | ||
727 | if (lv1ent_fault(sent)) { | 742 | if (lv1ent_fault(sent)) { |
728 | unsigned long *pent; | 743 | sysmmu_pte_t *pent; |
729 | 744 | ||
730 | pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); | 745 | pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); |
731 | BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); | 746 | BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1)); |
732 | if (!pent) | 747 | if (!pent) |
733 | return ERR_PTR(-ENOMEM); | 748 | return ERR_PTR(-ENOMEM); |
734 | 749 | ||
@@ -741,18 +756,18 @@ static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, | |||
741 | return page_entry(sent, iova); | 756 | return page_entry(sent, iova); |
742 | } | 757 | } |
743 | 758 | ||
744 | static int lv1set_section(unsigned long *sent, unsigned long iova, | 759 | static int lv1set_section(sysmmu_pte_t *sent, sysmmu_iova_t iova, |
745 | phys_addr_t paddr, short *pgcnt) | 760 | phys_addr_t paddr, short *pgcnt) |
746 | { | 761 | { |
747 | if (lv1ent_section(sent)) { | 762 | if (lv1ent_section(sent)) { |
748 | WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped", | 763 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
749 | iova); | 764 | iova); |
750 | return -EADDRINUSE; | 765 | return -EADDRINUSE; |
751 | } | 766 | } |
752 | 767 | ||
753 | if (lv1ent_page(sent)) { | 768 | if (lv1ent_page(sent)) { |
754 | if (*pgcnt != NUM_LV2ENTRIES) { | 769 | if (*pgcnt != NUM_LV2ENTRIES) { |
755 | WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped", | 770 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", |
756 | iova); | 771 | iova); |
757 | return -EADDRINUSE; | 772 | return -EADDRINUSE; |
758 | } | 773 | } |
@@ -768,7 +783,7 @@ static int lv1set_section(unsigned long *sent, unsigned long iova, | |||
768 | return 0; | 783 | return 0; |
769 | } | 784 | } |
770 | 785 | ||
771 | static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, | 786 | static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, |
772 | short *pgcnt) | 787 | short *pgcnt) |
773 | { | 788 | { |
774 | if (size == SPAGE_SIZE) { | 789 | if (size == SPAGE_SIZE) { |
@@ -800,11 +815,12 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, | |||
800 | return 0; | 815 | return 0; |
801 | } | 816 | } |
802 | 817 | ||
803 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, | 818 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, |
804 | phys_addr_t paddr, size_t size, int prot) | 819 | phys_addr_t paddr, size_t size, int prot) |
805 | { | 820 | { |
806 | struct exynos_iommu_domain *priv = domain->priv; | 821 | struct exynos_iommu_domain *priv = domain->priv; |
807 | unsigned long *entry; | 822 | sysmmu_pte_t *entry; |
823 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | ||
808 | unsigned long flags; | 824 | unsigned long flags; |
809 | int ret = -ENOMEM; | 825 | int ret = -ENOMEM; |
810 | 826 | ||
@@ -818,7 +834,7 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
818 | ret = lv1set_section(entry, iova, paddr, | 834 | ret = lv1set_section(entry, iova, paddr, |
819 | &priv->lv2entcnt[lv1ent_offset(iova)]); | 835 | &priv->lv2entcnt[lv1ent_offset(iova)]); |
820 | } else { | 836 | } else { |
821 | unsigned long *pent; | 837 | sysmmu_pte_t *pent; |
822 | 838 | ||
823 | pent = alloc_lv2entry(entry, iova, | 839 | pent = alloc_lv2entry(entry, iova, |
824 | &priv->lv2entcnt[lv1ent_offset(iova)]); | 840 | &priv->lv2entcnt[lv1ent_offset(iova)]); |
@@ -831,7 +847,7 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
831 | } | 847 | } |
832 | 848 | ||
833 | if (ret) | 849 | if (ret) |
834 | pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", | 850 | pr_debug("%s: Failed to map iova %#x/%#zx bytes\n", |
835 | __func__, iova, size); | 851 | __func__, iova, size); |
836 | 852 | ||
837 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | 853 | spin_unlock_irqrestore(&priv->pgtablelock, flags); |
@@ -840,13 +856,14 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
840 | } | 856 | } |
841 | 857 | ||
842 | static size_t exynos_iommu_unmap(struct iommu_domain *domain, | 858 | static size_t exynos_iommu_unmap(struct iommu_domain *domain, |
843 | unsigned long iova, size_t size) | 859 | unsigned long l_iova, size_t size) |
844 | { | 860 | { |
845 | struct exynos_iommu_domain *priv = domain->priv; | 861 | struct exynos_iommu_domain *priv = domain->priv; |
846 | struct sysmmu_drvdata *data; | 862 | struct sysmmu_drvdata *data; |
847 | unsigned long flags; | 863 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
848 | unsigned long *ent; | 864 | sysmmu_pte_t *ent; |
849 | size_t err_pgsize; | 865 | size_t err_pgsize; |
866 | unsigned long flags; | ||
850 | 867 | ||
851 | BUG_ON(priv->pgtable == NULL); | 868 | BUG_ON(priv->pgtable == NULL); |
852 | 869 | ||
@@ -913,7 +930,7 @@ err: | |||
913 | spin_unlock_irqrestore(&priv->pgtablelock, flags); | 930 | spin_unlock_irqrestore(&priv->pgtablelock, flags); |
914 | 931 | ||
915 | WARN(1, | 932 | WARN(1, |
916 | "%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n", | 933 | "%s: Failed due to size(%#zx) @ %#x is smaller than page size %#zx\n", |
917 | __func__, size, iova, err_pgsize); | 934 | __func__, size, iova, err_pgsize); |
918 | 935 | ||
919 | return 0; | 936 | return 0; |
@@ -923,7 +940,7 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, | |||
923 | dma_addr_t iova) | 940 | dma_addr_t iova) |
924 | { | 941 | { |
925 | struct exynos_iommu_domain *priv = domain->priv; | 942 | struct exynos_iommu_domain *priv = domain->priv; |
926 | unsigned long *entry; | 943 | sysmmu_pte_t *entry; |
927 | unsigned long flags; | 944 | unsigned long flags; |
928 | phys_addr_t phys = 0; | 945 | phys_addr_t phys = 0; |
929 | 946 | ||