aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMitchel Humpherys <mitchelh@codeaurora.org>2014-07-08 12:52:18 -0400
committerWill Deacon <will.deacon@arm.com>2014-07-09 05:38:23 -0400
commit2907320df3189420cb66178a86b2917f4b64018f (patch)
tree953cae4299978b931d6b7face89fd81173c05644
parentd3bca16635ae7443139c4408def7c1a50755083f (diff)
iommu/arm-smmu: fix some checkpatch issues
Fix some issues reported by checkpatch.pl. Mostly whitespace, but also includes min=>min_t, kzalloc=>kcalloc, and kmalloc=>kmalloc_array. The only issue I'm leaving alone is: arm-smmu.c:853: WARNING: line over 80 characters #853: FILE: arm-smmu.c:853: + (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | since it seems to be a case where "exceeding 80 columns significantly increases readability and does not hide information." (Documentation/CodingStyle). Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu.c59
1 files changed, 37 insertions, 22 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 5496de58fc3b..f3f66416e252 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -317,9 +317,9 @@
317#define FSR_AFF (1 << 2) 317#define FSR_AFF (1 << 2)
318#define FSR_TF (1 << 1) 318#define FSR_TF (1 << 1)
319 319
320#define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \ 320#define FSR_IGN (FSR_AFF | FSR_ASF | \
321 FSR_TLBLKF) 321 FSR_TLBMCF | FSR_TLBLKF)
322#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ 322#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
323 FSR_EF | FSR_PF | FSR_TF | FSR_IGN) 323 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
324 324
325#define FSYNR0_WNR (1 << 4) 325#define FSYNR0_WNR (1 << 4)
@@ -405,7 +405,7 @@ struct arm_smmu_option_prop {
405 const char *prop; 405 const char *prop;
406}; 406};
407 407
408static struct arm_smmu_option_prop arm_smmu_options [] = { 408static struct arm_smmu_option_prop arm_smmu_options[] = {
409 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, 409 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
410 { 0, NULL}, 410 { 0, NULL},
411}; 411};
@@ -413,6 +413,7 @@ static struct arm_smmu_option_prop arm_smmu_options [] = {
413static void parse_driver_options(struct arm_smmu_device *smmu) 413static void parse_driver_options(struct arm_smmu_device *smmu)
414{ 414{
415 int i = 0; 415 int i = 0;
416
416 do { 417 do {
417 if (of_property_read_bool(smmu->dev->of_node, 418 if (of_property_read_bool(smmu->dev->of_node,
418 arm_smmu_options[i].prop)) { 419 arm_smmu_options[i].prop)) {
@@ -427,6 +428,7 @@ static struct device *dev_get_master_dev(struct device *dev)
427{ 428{
428 if (dev_is_pci(dev)) { 429 if (dev_is_pci(dev)) {
429 struct pci_bus *bus = to_pci_dev(dev)->bus; 430 struct pci_bus *bus = to_pci_dev(dev)->bus;
431
430 while (!pci_is_root_bus(bus)) 432 while (!pci_is_root_bus(bus))
431 bus = bus->parent; 433 bus = bus->parent;
432 return bus->bridge->parent; 434 return bus->bridge->parent;
@@ -442,6 +444,7 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
442 444
443 while (node) { 445 while (node) {
444 struct arm_smmu_master *master; 446 struct arm_smmu_master *master;
447
445 master = container_of(node, struct arm_smmu_master, node); 448 master = container_of(node, struct arm_smmu_master, node);
446 449
447 if (dev_node < master->of_node) 450 if (dev_node < master->of_node)
@@ -475,8 +478,8 @@ static int insert_smmu_master(struct arm_smmu_device *smmu,
475 new = &smmu->masters.rb_node; 478 new = &smmu->masters.rb_node;
476 parent = NULL; 479 parent = NULL;
477 while (*new) { 480 while (*new) {
478 struct arm_smmu_master *this; 481 struct arm_smmu_master *this
479 this = container_of(*new, struct arm_smmu_master, node); 482 = container_of(*new, struct arm_smmu_master, node);
480 483
481 parent = *new; 484 parent = *new;
482 if (master->of_node < this->of_node) 485 if (master->of_node < this->of_node)
@@ -716,7 +719,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
716 /* CBAR */ 719 /* CBAR */
717 reg = cfg->cbar; 720 reg = cfg->cbar;
718 if (smmu->version == 1) 721 if (smmu->version == 1)
719 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; 722 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
720 723
721 /* 724 /*
722 * Use the weakest shareability/memory types, so they are 725 * Use the weakest shareability/memory types, so they are
@@ -954,7 +957,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
954 if (!smmu_domain) 957 if (!smmu_domain)
955 return -ENOMEM; 958 return -ENOMEM;
956 959
957 pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); 960 pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL);
958 if (!pgd) 961 if (!pgd)
959 goto out_free_domain; 962 goto out_free_domain;
960 smmu_domain->cfg.pgd = pgd; 963 smmu_domain->cfg.pgd = pgd;
@@ -971,6 +974,7 @@ out_free_domain:
971static void arm_smmu_free_ptes(pmd_t *pmd) 974static void arm_smmu_free_ptes(pmd_t *pmd)
972{ 975{
973 pgtable_t table = pmd_pgtable(*pmd); 976 pgtable_t table = pmd_pgtable(*pmd);
977
974 pgtable_page_dtor(table); 978 pgtable_page_dtor(table);
975 __free_page(table); 979 __free_page(table);
976} 980}
@@ -1057,7 +1061,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1057 if (cfg->smrs) 1061 if (cfg->smrs)
1058 return -EEXIST; 1062 return -EEXIST;
1059 1063
1060 smrs = kmalloc(sizeof(*smrs) * cfg->num_streamids, GFP_KERNEL); 1064 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1061 if (!smrs) { 1065 if (!smrs) {
1062 dev_err(smmu->dev, "failed to allocate %d SMRs\n", 1066 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1063 cfg->num_streamids); 1067 cfg->num_streamids);
@@ -1107,6 +1111,7 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1107 /* Invalidate the SMRs before freeing back to the allocator */ 1111 /* Invalidate the SMRs before freeing back to the allocator */
1108 for (i = 0; i < cfg->num_streamids; ++i) { 1112 for (i = 0; i < cfg->num_streamids; ++i) {
1109 u8 idx = smrs[i].idx; 1113 u8 idx = smrs[i].idx;
1114
1110 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); 1115 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1111 __arm_smmu_free_bitmap(smmu->smr_map, idx); 1116 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1112 } 1117 }
@@ -1123,6 +1128,7 @@ static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
1123 1128
1124 for (i = 0; i < cfg->num_streamids; ++i) { 1129 for (i = 0; i < cfg->num_streamids; ++i) {
1125 u16 sid = cfg->streamids[i]; 1130 u16 sid = cfg->streamids[i];
1131
1126 writel_relaxed(S2CR_TYPE_BYPASS, 1132 writel_relaxed(S2CR_TYPE_BYPASS,
1127 gr0_base + ARM_SMMU_GR0_S2CR(sid)); 1133 gr0_base + ARM_SMMU_GR0_S2CR(sid));
1128 } 1134 }
@@ -1141,6 +1147,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1141 1147
1142 for (i = 0; i < cfg->num_streamids; ++i) { 1148 for (i = 0; i < cfg->num_streamids; ++i) {
1143 u32 idx, s2cr; 1149 u32 idx, s2cr;
1150
1144 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; 1151 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1145 s2cr = S2CR_TYPE_TRANS | 1152 s2cr = S2CR_TYPE_TRANS |
1146 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); 1153 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
@@ -1235,6 +1242,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1235 if (pmd_none(*pmd)) { 1242 if (pmd_none(*pmd)) {
1236 /* Allocate a new set of tables */ 1243 /* Allocate a new set of tables */
1237 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); 1244 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1245
1238 if (!table) 1246 if (!table)
1239 return -ENOMEM; 1247 return -ENOMEM;
1240 1248
@@ -1300,6 +1308,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1300 */ 1308 */
1301 do { 1309 do {
1302 int i = 1; 1310 int i = 1;
1311
1303 pteval &= ~ARM_SMMU_PTE_CONT; 1312 pteval &= ~ARM_SMMU_PTE_CONT;
1304 1313
1305 if (arm_smmu_pte_is_contiguous_range(addr, end)) { 1314 if (arm_smmu_pte_is_contiguous_range(addr, end)) {
@@ -1314,7 +1323,8 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1314 idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); 1323 idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
1315 cont_start = pmd_page_vaddr(*pmd) + idx; 1324 cont_start = pmd_page_vaddr(*pmd) + idx;
1316 for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) 1325 for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
1317 pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT; 1326 pte_val(*(cont_start + j)) &=
1327 ~ARM_SMMU_PTE_CONT;
1318 1328
1319 arm_smmu_flush_pgtable(smmu, cont_start, 1329 arm_smmu_flush_pgtable(smmu, cont_start,
1320 sizeof(*pte) * 1330 sizeof(*pte) *
@@ -1617,7 +1627,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1617 /* Mark all SMRn as invalid and all S2CRn as bypass */ 1627 /* Mark all SMRn as invalid and all S2CRn as bypass */
1618 for (i = 0; i < smmu->num_mapping_groups; ++i) { 1628 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1619 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); 1629 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
1620 writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); 1630 writel_relaxed(S2CR_TYPE_BYPASS,
1631 gr0_base + ARM_SMMU_GR0_S2CR(i));
1621 } 1632 }
1622 1633
1623 /* Make sure all context banks are disabled and clear CB_FSR */ 1634 /* Make sure all context banks are disabled and clear CB_FSR */
@@ -1757,11 +1768,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1757 smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; 1768 smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
1758 1769
1759 /* Check for size mismatch of SMMU address space from mapped region */ 1770 /* Check for size mismatch of SMMU address space from mapped region */
1760 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); 1771 size = 1 <<
1772 (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1761 size *= (smmu->pagesize << 1); 1773 size *= (smmu->pagesize << 1);
1762 if (smmu->size != size) 1774 if (smmu->size != size)
1763 dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs " 1775 dev_warn(smmu->dev,
1764 "from mapped region size (0x%lx)!\n", size, smmu->size); 1776 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1777 size, smmu->size);
1765 1778
1766 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & 1779 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
1767 ID1_NUMS2CB_MASK; 1780 ID1_NUMS2CB_MASK;
@@ -1782,14 +1795,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1782 * allocation (PTRS_PER_PGD). 1795 * allocation (PTRS_PER_PGD).
1783 */ 1796 */
1784#ifdef CONFIG_64BIT 1797#ifdef CONFIG_64BIT
1785 smmu->s1_output_size = min((unsigned long)VA_BITS, size); 1798 smmu->s1_output_size = min_t(unsigned long, VA_BITS, size);
1786#else 1799#else
1787 smmu->s1_output_size = min(32UL, size); 1800 smmu->s1_output_size = min(32UL, size);
1788#endif 1801#endif
1789 1802
1790 /* The stage-2 output mask is also applied for bypass */ 1803 /* The stage-2 output mask is also applied for bypass */
1791 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); 1804 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1792 smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size); 1805 smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
1793 1806
1794 if (smmu->version == 1) { 1807 if (smmu->version == 1) {
1795 smmu->input_size = 32; 1808 smmu->input_size = 32;
@@ -1813,7 +1826,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1813 1826
1814 dev_notice(smmu->dev, 1827 dev_notice(smmu->dev,
1815 "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", 1828 "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
1816 smmu->input_size, smmu->s1_output_size, smmu->s2_output_size); 1829 smmu->input_size, smmu->s1_output_size,
1830 smmu->s2_output_size);
1817 return 0; 1831 return 0;
1818} 1832}
1819 1833
@@ -1867,6 +1881,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1867 1881
1868 for (i = 0; i < num_irqs; ++i) { 1882 for (i = 0; i < num_irqs; ++i) {
1869 int irq = platform_get_irq(pdev, i); 1883 int irq = platform_get_irq(pdev, i);
1884
1870 if (irq < 0) { 1885 if (irq < 0) {
1871 dev_err(dev, "failed to get irq index %d\n", i); 1886 dev_err(dev, "failed to get irq index %d\n", i);
1872 return -ENODEV; 1887 return -ENODEV;
@@ -1932,8 +1947,8 @@ out_free_irqs:
1932 1947
1933out_put_masters: 1948out_put_masters:
1934 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { 1949 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1935 struct arm_smmu_master *master; 1950 struct arm_smmu_master *master
1936 master = container_of(node, struct arm_smmu_master, node); 1951 = container_of(node, struct arm_smmu_master, node);
1937 of_node_put(master->of_node); 1952 of_node_put(master->of_node);
1938 } 1953 }
1939 1954
@@ -1961,8 +1976,8 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
1961 return -ENODEV; 1976 return -ENODEV;
1962 1977
1963 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { 1978 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1964 struct arm_smmu_master *master; 1979 struct arm_smmu_master *master
1965 master = container_of(node, struct arm_smmu_master, node); 1980 = container_of(node, struct arm_smmu_master, node);
1966 of_node_put(master->of_node); 1981 of_node_put(master->of_node);
1967 } 1982 }
1968 1983
@@ -1973,7 +1988,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
1973 free_irq(smmu->irqs[i], smmu); 1988 free_irq(smmu->irqs[i], smmu);
1974 1989
1975 /* Turn the thing off */ 1990 /* Turn the thing off */
1976 writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 1991 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1977 return 0; 1992 return 0;
1978} 1993}
1979 1994