aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2016-05-27 17:23:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-27 18:26:11 -0400
commit287980e49ffc0f6d911601e7e352a812ed27768e (patch)
treea906f835eb5be85dca4fd0c9c6f21b8f60920424 /drivers/iommu
parent7ded384a12688c2a86b618da16bc87713404dfcc (diff)
remove lots of IS_ERR_VALUE abuses
Most users of IS_ERR_VALUE() in the kernel are wrong, as they pass an 'int' into a function that takes an 'unsigned long' argument. This happens to work because the type is sign-extended on 64-bit architectures before it gets converted into an unsigned type. However, anything that passes an 'unsigned short' or 'unsigned int' argument into IS_ERR_VALUE() is guaranteed to be broken, as are 8-bit integers and types that are wider than 'unsigned long'. Andrzej Hajda has already fixed a lot of the worst abusers that were causing actual bugs, but it would be nice to prevent any users that are not passing 'unsigned long' arguments. This patch changes all users of IS_ERR_VALUE() that I could find on 32-bit ARM randconfig builds and x86 allmodconfig. For the moment, this doesn't change the definition of IS_ERR_VALUE() because there are probably still architecture specific users elsewhere. Almost all the warnings I got are for files that are better off using 'if (err)' or 'if (err < 0)'. The only legitimate user I could find that we get a warning for is the (32-bit only) freescale fman driver, so I did not remove the IS_ERR_VALUE() there but changed the type to 'unsigned long'. For 9pfs, I just worked around one user whose calling conventions are so obscure that I did not dare change the behavior. I was using this definition for testing: #define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \ unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO)) which ends up making all 16-bit or wider types work correctly with the most plausible interpretation of what IS_ERR_VALUE() was supposed to return according to its users, but also causes a compile-time warning for any users that do not pass an 'unsigned long' argument. I suggested this approach earlier this year, but back then we ended up deciding to just fix the users that are obviously broken. After the initial warning that caused me to get involved in the discussion (fs/gfs2/dir.c) showed up again in the mainline kernel, Linus asked me to send the whole thing again. [ Updated the 9p parts as per Al Viro - Linus ] Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.org/lkml/2016/1/7/363 Link: https://lkml.org/lkml/2016/5/27/486 Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/arm-smmu-v3.c18
-rw-r--r--drivers/iommu/arm-smmu.c8
2 files changed, 13 insertions, 13 deletions
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ebab33e77d67..94b68213c50d 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1477,7 +1477,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1477 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; 1477 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1478 1478
1479 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); 1479 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1480 if (IS_ERR_VALUE(asid)) 1480 if (asid < 0)
1481 return asid; 1481 return asid;
1482 1482
1483 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, 1483 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
@@ -1508,7 +1508,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1508 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 1508 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1509 1509
1510 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); 1510 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1511 if (IS_ERR_VALUE(vmid)) 1511 if (vmid < 0)
1512 return vmid; 1512 return vmid;
1513 1513
1514 cfg->vmid = (u16)vmid; 1514 cfg->vmid = (u16)vmid;
@@ -1569,7 +1569,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1569 smmu_domain->pgtbl_ops = pgtbl_ops; 1569 smmu_domain->pgtbl_ops = pgtbl_ops;
1570 1570
1571 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); 1571 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1572 if (IS_ERR_VALUE(ret)) 1572 if (ret < 0)
1573 free_io_pgtable_ops(pgtbl_ops); 1573 free_io_pgtable_ops(pgtbl_ops);
1574 1574
1575 return ret; 1575 return ret;
@@ -1642,7 +1642,7 @@ static void arm_smmu_detach_dev(struct device *dev)
1642 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); 1642 struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
1643 1643
1644 smmu_group->ste.bypass = true; 1644 smmu_group->ste.bypass = true;
1645 if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group))) 1645 if (arm_smmu_install_ste_for_group(smmu_group) < 0)
1646 dev_warn(dev, "failed to install bypass STE\n"); 1646 dev_warn(dev, "failed to install bypass STE\n");
1647 1647
1648 smmu_group->domain = NULL; 1648 smmu_group->domain = NULL;
@@ -1694,7 +1694,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1694 smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; 1694 smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
1695 1695
1696 ret = arm_smmu_install_ste_for_group(smmu_group); 1696 ret = arm_smmu_install_ste_for_group(smmu_group);
1697 if (IS_ERR_VALUE(ret)) 1697 if (ret < 0)
1698 smmu_group->domain = NULL; 1698 smmu_group->domain = NULL;
1699 1699
1700out_unlock: 1700out_unlock:
@@ -2235,7 +2235,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2235 arm_smmu_evtq_handler, 2235 arm_smmu_evtq_handler,
2236 arm_smmu_evtq_thread, 2236 arm_smmu_evtq_thread,
2237 0, "arm-smmu-v3-evtq", smmu); 2237 0, "arm-smmu-v3-evtq", smmu);
2238 if (IS_ERR_VALUE(ret)) 2238 if (ret < 0)
2239 dev_warn(smmu->dev, "failed to enable evtq irq\n"); 2239 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2240 } 2240 }
2241 2241
@@ -2244,7 +2244,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2244 ret = devm_request_irq(smmu->dev, irq, 2244 ret = devm_request_irq(smmu->dev, irq,
2245 arm_smmu_cmdq_sync_handler, 0, 2245 arm_smmu_cmdq_sync_handler, 0,
2246 "arm-smmu-v3-cmdq-sync", smmu); 2246 "arm-smmu-v3-cmdq-sync", smmu);
2247 if (IS_ERR_VALUE(ret)) 2247 if (ret < 0)
2248 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n"); 2248 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2249 } 2249 }
2250 2250
@@ -2252,7 +2252,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2252 if (irq) { 2252 if (irq) {
2253 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, 2253 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2254 0, "arm-smmu-v3-gerror", smmu); 2254 0, "arm-smmu-v3-gerror", smmu);
2255 if (IS_ERR_VALUE(ret)) 2255 if (ret < 0)
2256 dev_warn(smmu->dev, "failed to enable gerror irq\n"); 2256 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2257 } 2257 }
2258 2258
@@ -2264,7 +2264,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2264 arm_smmu_priq_thread, 2264 arm_smmu_priq_thread,
2265 0, "arm-smmu-v3-priq", 2265 0, "arm-smmu-v3-priq",
2266 smmu); 2266 smmu);
2267 if (IS_ERR_VALUE(ret)) 2267 if (ret < 0)
2268 dev_warn(smmu->dev, 2268 dev_warn(smmu->dev,
2269 "failed to enable priq irq\n"); 2269 "failed to enable priq irq\n");
2270 else 2270 else
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e206ce7a4e4b..9345a3fcb706 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -950,7 +950,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
950 950
951 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, 951 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
952 smmu->num_context_banks); 952 smmu->num_context_banks);
953 if (IS_ERR_VALUE(ret)) 953 if (ret < 0)
954 goto out_unlock; 954 goto out_unlock;
955 955
956 cfg->cbndx = ret; 956 cfg->cbndx = ret;
@@ -989,7 +989,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
989 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 989 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
990 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, 990 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
991 "arm-smmu-context-fault", domain); 991 "arm-smmu-context-fault", domain);
992 if (IS_ERR_VALUE(ret)) { 992 if (ret < 0) {
993 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 993 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
994 cfg->irptndx, irq); 994 cfg->irptndx, irq);
995 cfg->irptndx = INVALID_IRPTNDX; 995 cfg->irptndx = INVALID_IRPTNDX;
@@ -1099,7 +1099,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1099 for (i = 0; i < cfg->num_streamids; ++i) { 1099 for (i = 0; i < cfg->num_streamids; ++i) {
1100 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, 1100 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1101 smmu->num_mapping_groups); 1101 smmu->num_mapping_groups);
1102 if (IS_ERR_VALUE(idx)) { 1102 if (idx < 0) {
1103 dev_err(smmu->dev, "failed to allocate free SMR\n"); 1103 dev_err(smmu->dev, "failed to allocate free SMR\n");
1104 goto err_free_smrs; 1104 goto err_free_smrs;
1105 } 1105 }
@@ -1233,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1233 1233
1234 /* Ensure that the domain is finalised */ 1234 /* Ensure that the domain is finalised */
1235 ret = arm_smmu_init_domain_context(domain, smmu); 1235 ret = arm_smmu_init_domain_context(domain, smmu);
1236 if (IS_ERR_VALUE(ret)) 1236 if (ret < 0)
1237 return ret; 1237 return ret;
1238 1238
1239 /* 1239 /*