diff options
author | Sachin Kamat <sachin.kamat@samsung.com> | 2014-08-04 00:36:28 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2014-08-19 07:07:43 -0400 |
commit | f171abab8f1a75797124be5aae8376e20e4852d9 (patch) | |
tree | d07f14a9cb8a18c733de1f6470fa50af96edc95d /drivers/iommu | |
parent | 7d1311b93e58ed55f3a31cc8f94c4b8fe988a2b9 (diff) |
iommu/exynos: Fix trivial typos
Fixed trivial typos and grammar to improve readability.
Changed w/a to workaround.
Signed-off-by: Sachin Kamat <sachin.kamat@samsung.com>
Acked-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 51 |
1 files changed, 26 insertions, 25 deletions
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index d037e87a1fe5..74233186f6f7 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -32,7 +32,7 @@ | |||
32 | typedef u32 sysmmu_iova_t; | 32 | typedef u32 sysmmu_iova_t; |
33 | typedef u32 sysmmu_pte_t; | 33 | typedef u32 sysmmu_pte_t; |
34 | 34 | ||
35 | /* We does not consider super section mapping (16MB) */ | 35 | /* We do not consider super section mapping (16MB) */ |
36 | #define SECT_ORDER 20 | 36 | #define SECT_ORDER 20 |
37 | #define LPAGE_ORDER 16 | 37 | #define LPAGE_ORDER 16 |
38 | #define SPAGE_ORDER 12 | 38 | #define SPAGE_ORDER 12 |
@@ -307,7 +307,7 @@ static void show_fault_information(const char *name, | |||
307 | 307 | ||
308 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) | 308 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) |
309 | { | 309 | { |
310 | /* SYSMMU is in blocked when interrupt occurred. */ | 310 | /* SYSMMU is in blocked state when interrupt occurred. */ |
311 | struct sysmmu_drvdata *data = dev_id; | 311 | struct sysmmu_drvdata *data = dev_id; |
312 | enum exynos_sysmmu_inttype itype; | 312 | enum exynos_sysmmu_inttype itype; |
313 | sysmmu_iova_t addr = -1; | 313 | sysmmu_iova_t addr = -1; |
@@ -567,8 +567,8 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, | |||
567 | /* | 567 | /* |
568 | * L2TLB invalidation required | 568 | * L2TLB invalidation required |
569 | * 4KB page: 1 invalidation | 569 | * 4KB page: 1 invalidation |
570 | * 64KB page: 16 invalidation | 570 | * 64KB page: 16 invalidations |
571 | * 1MB page: 64 invalidation | 571 | * 1MB page: 64 invalidations |
572 | * because it is set-associative TLB | 572 | * because it is set-associative TLB |
573 | * with 8-way and 64 sets. | 573 | * with 8-way and 64 sets. |
574 | * 1MB page can be cached in one of all sets. | 574 | * 1MB page can be cached in one of all sets. |
@@ -714,7 +714,7 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) | |||
714 | if (!priv->lv2entcnt) | 714 | if (!priv->lv2entcnt) |
715 | goto err_counter; | 715 | goto err_counter; |
716 | 716 | ||
717 | /* w/a of System MMU v3.3 to prevent caching 1MiB mapping */ | 717 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
718 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { | 718 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { |
719 | priv->pgtable[i + 0] = ZERO_LV2LINK; | 719 | priv->pgtable[i + 0] = ZERO_LV2LINK; |
720 | priv->pgtable[i + 1] = ZERO_LV2LINK; | 720 | priv->pgtable[i + 1] = ZERO_LV2LINK; |
@@ -861,14 +861,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv, | |||
861 | pgtable_flush(sent, sent + 1); | 861 | pgtable_flush(sent, sent + 1); |
862 | 862 | ||
863 | /* | 863 | /* |
864 | * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache | 864 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, |
865 | * may caches the address of zero_l2_table. This function | 865 | * FLPD cache may cache the address of zero_l2_table. This |
866 | * replaces the zero_l2_table with new L2 page table to write | 866 | * function replaces the zero_l2_table with new L2 page table |
867 | * valid mappings. | 867 | * to write valid mappings. |
868 | * Accessing the valid area may cause page fault since FLPD | 868 | * Accessing the valid area may cause page fault since FLPD |
869 | * cache may still caches zero_l2_table for the valid area | 869 | * cache may still cache zero_l2_table for the valid area |
870 | * instead of new L2 page table that have the mapping | 870 | * instead of new L2 page table that has the mapping |
871 | * information of the valid area | 871 | * information of the valid area. |
872 | * Thus any replacement of zero_l2_table with other valid L2 | 872 | * Thus any replacement of zero_l2_table with other valid L2 |
873 | * page table must involve FLPD cache invalidation for System | 873 | * page table must involve FLPD cache invalidation for System |
874 | * MMU v3.3. | 874 | * MMU v3.3. |
@@ -963,27 +963,27 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, | |||
963 | /* | 963 | /* |
964 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: | 964 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: |
965 | * | 965 | * |
966 | * System MMU v3.x have an advanced logic to improve address translation | 966 | * System MMU v3.x has advanced logic to improve address translation |
967 | * performance with caching more page table entries by a page table walk. | 967 | * performance with caching more page table entries by a page table walk. |
968 | * However, the logic has a bug that caching fault page table entries and System | 968 | * However, the logic has a bug that while caching faulty page table entries, |
969 | * MMU reports page fault if the cached fault entry is hit even though the fault | 969 | * System MMU reports page fault if the cached fault entry is hit even though |
970 | * entry is updated to a valid entry after the entry is cached. | 970 | * the fault entry is updated to a valid entry after the entry is cached. |
971 | * To prevent caching fault page table entries which may be updated to valid | 971 | * To prevent caching faulty page table entries which may be updated to valid |
972 | * entries later, the virtual memory manager should care about the w/a about the | 972 | * entries later, the virtual memory manager should care about the workaround |
973 | * problem. The followings describe w/a. | 973 | * for the problem. The following describes the workaround. |
974 | * | 974 | * |
975 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB | 975 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB |
976 | * in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug) | 976 | * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). |
977 | * | 977 | * |
978 | * Precisely, any start address of I/O virtual region must be aligned by | 978 | * Precisely, any start address of I/O virtual region must be aligned with |
979 | * the following sizes for System MMU v3.1 and v3.2. | 979 | * the following sizes for System MMU v3.1 and v3.2. |
980 | * System MMU v3.1: 128KiB | 980 | * System MMU v3.1: 128KiB |
981 | * System MMU v3.2: 256KiB | 981 | * System MMU v3.2: 256KiB |
982 | * | 982 | * |
983 | * Because System MMU v3.3 caches page table entries more aggressively, it needs | 983 | * Because System MMU v3.3 caches page table entries more aggressively, it needs |
984 | * more w/a. | 984 | * more workarounds. |
985 | * - Any two consecutive I/O virtual regions must be have a hole of larger size | 985 | * - Any two consecutive I/O virtual regions must have a hole of size larger |
986 | * than or equal size to 128KiB. | 986 | * than or equal to 128KiB. |
987 | * - Start address of an I/O virtual region must be aligned by 128KiB. | 987 | * - Start address of an I/O virtual region must be aligned by 128KiB. |
988 | */ | 988 | */ |
989 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, | 989 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, |
@@ -1061,7 +1061,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, | |||
1061 | goto err; | 1061 | goto err; |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | *ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */ | 1064 | /* workaround for h/w bug in System MMU v3.3 */ |
1065 | *ent = ZERO_LV2LINK; | ||
1065 | pgtable_flush(ent, ent + 1); | 1066 | pgtable_flush(ent, ent + 1); |
1066 | size = SECT_SIZE; | 1067 | size = SECT_SIZE; |
1067 | goto done; | 1068 | goto done; |