aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarkus Elfring <elfring@users.sourceforge.net>2016-09-17 08:10:47 -0400
committerVinod Koul <vinod.koul@intel.com>2016-09-26 13:36:17 -0400
commitf4534adbcfbb38a21691ca8dfdc8750689d8bcc9 (patch)
tree6ef8dd7a60f37b78d969d8dafac6b2fcec4b1126
parent28c01058b28527be2a81e8ba2a53437910defbf3 (diff)
ste_dma40: Less checks in d40_hw_detect_init() after error detection
Four checks could be repeated by the d40_hw_detect_init() function during error handling even if the passed variables contained a null pointer. * Adjust jump targets according to the Linux coding style convention. * Call the interface "iounmap" only once at the end. * Delete the repeated checks which became unnecessary with this refactoring. Signed-off-by: Markus Elfring <elfring@users.sourceforge.net> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/ste_dma40.c67
1 files changed, 33 insertions, 34 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 4892c23b6c7b..e4c5c8b91e28 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3160,27 +3160,27 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3160 clk = clk_get(&pdev->dev, NULL); 3160 clk = clk_get(&pdev->dev, NULL);
3161 if (IS_ERR(clk)) { 3161 if (IS_ERR(clk)) {
3162 d40_err(&pdev->dev, "No matching clock found\n"); 3162 d40_err(&pdev->dev, "No matching clock found\n");
3163 goto failure; 3163 goto check_prepare_enabled;
3164 } 3164 }
3165 3165
3166 clk_ret = clk_prepare_enable(clk); 3166 clk_ret = clk_prepare_enable(clk);
3167 if (clk_ret) { 3167 if (clk_ret) {
3168 d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); 3168 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3169 goto failure; 3169 goto disable_unprepare;
3170 } 3170 }
3171 3171
3172 /* Get IO for DMAC base address */ 3172 /* Get IO for DMAC base address */
3173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 3173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3174 if (!res) 3174 if (!res)
3175 goto failure; 3175 goto disable_unprepare;
3176 3176
3177 if (request_mem_region(res->start, resource_size(res), 3177 if (request_mem_region(res->start, resource_size(res),
3178 D40_NAME " I/O base") == NULL) 3178 D40_NAME " I/O base") == NULL)
3179 goto failure; 3179 goto release_region;
3180 3180
3181 virtbase = ioremap(res->start, resource_size(res)); 3181 virtbase = ioremap(res->start, resource_size(res));
3182 if (!virtbase) 3182 if (!virtbase)
3183 goto failure; 3183 goto release_region;
3184 3184
3185 /* This is just a regular AMBA PrimeCell ID actually */ 3185 /* This is just a regular AMBA PrimeCell ID actually */
3186 for (pid = 0, i = 0; i < 4; i++) 3186 for (pid = 0, i = 0; i < 4; i++)
@@ -3192,13 +3192,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3192 3192
3193 if (cid != AMBA_CID) { 3193 if (cid != AMBA_CID) {
3194 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); 3194 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3195 goto failure; 3195 goto unmap_io;
3196 } 3196 }
3197 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { 3197 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3198 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 3198 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3199 AMBA_MANF_BITS(pid), 3199 AMBA_MANF_BITS(pid),
3200 AMBA_VENDOR_ST); 3200 AMBA_VENDOR_ST);
3201 goto failure; 3201 goto unmap_io;
3202 } 3202 }
3203 /* 3203 /*
3204 * HW revision: 3204 * HW revision:
@@ -3212,7 +3212,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3212 rev = AMBA_REV_BITS(pid); 3212 rev = AMBA_REV_BITS(pid);
3213 if (rev < 2) { 3213 if (rev < 2) {
3214 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); 3214 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3215 goto failure; 3215 goto unmap_io;
3216 } 3216 }
3217 3217
3218 /* The number of physical channels on this HW */ 3218 /* The number of physical channels on this HW */
@@ -3238,7 +3238,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3238 sizeof(struct d40_chan), GFP_KERNEL); 3238 sizeof(struct d40_chan), GFP_KERNEL);
3239 3239
3240 if (base == NULL) 3240 if (base == NULL)
3241 goto failure; 3241 goto unmap_io;
3242 3242
3243 base->rev = rev; 3243 base->rev = rev;
3244 base->clk = clk; 3244 base->clk = clk;
@@ -3287,63 +3287,62 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3287 sizeof(*base->phy_res), 3287 sizeof(*base->phy_res),
3288 GFP_KERNEL); 3288 GFP_KERNEL);
3289 if (!base->phy_res) 3289 if (!base->phy_res)
3290 goto failure; 3290 goto free_base;
3291 3291
3292 base->lookup_phy_chans = kcalloc(num_phy_chans, 3292 base->lookup_phy_chans = kcalloc(num_phy_chans,
3293 sizeof(*base->lookup_phy_chans), 3293 sizeof(*base->lookup_phy_chans),
3294 GFP_KERNEL); 3294 GFP_KERNEL);
3295 if (!base->lookup_phy_chans) 3295 if (!base->lookup_phy_chans)
3296 goto failure; 3296 goto free_phy_res;
3297 3297
3298 base->lookup_log_chans = kcalloc(num_log_chans, 3298 base->lookup_log_chans = kcalloc(num_log_chans,
3299 sizeof(*base->lookup_log_chans), 3299 sizeof(*base->lookup_log_chans),
3300 GFP_KERNEL); 3300 GFP_KERNEL);
3301 if (!base->lookup_log_chans) 3301 if (!base->lookup_log_chans)
3302 goto failure; 3302 goto free_phy_chans;
3303 3303
3304 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans, 3304 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3305 sizeof(d40_backup_regs_chan), 3305 sizeof(d40_backup_regs_chan),
3306 GFP_KERNEL); 3306 GFP_KERNEL);
3307 if (!base->reg_val_backup_chan) 3307 if (!base->reg_val_backup_chan)
3308 goto failure; 3308 goto free_log_chans;
3309 3309
3310 base->lcla_pool.alloc_map = kcalloc(num_phy_chans 3310 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3311 * D40_LCLA_LINK_PER_EVENT_GRP, 3311 * D40_LCLA_LINK_PER_EVENT_GRP,
3312 sizeof(*base->lcla_pool.alloc_map), 3312 sizeof(*base->lcla_pool.alloc_map),
3313 GFP_KERNEL); 3313 GFP_KERNEL);
3314 if (!base->lcla_pool.alloc_map) 3314 if (!base->lcla_pool.alloc_map)
3315 goto failure; 3315 goto free_backup_chan;
3316 3316
3317 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 3317 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3318 0, SLAB_HWCACHE_ALIGN, 3318 0, SLAB_HWCACHE_ALIGN,
3319 NULL); 3319 NULL);
3320 if (base->desc_slab == NULL) 3320 if (base->desc_slab == NULL)
3321 goto failure; 3321 goto free_map;
3322 3322
3323 return base; 3323 return base;
3324 3324 free_map:
3325failure: 3325 kfree(base->lcla_pool.alloc_map);
3326 free_backup_chan:
3327 kfree(base->reg_val_backup_chan);
3328 free_log_chans:
3329 kfree(base->lookup_log_chans);
3330 free_phy_chans:
3331 kfree(base->lookup_phy_chans);
3332 free_phy_res:
3333 kfree(base->phy_res);
3334 free_base:
3335 kfree(base);
3336 unmap_io:
3337 iounmap(virtbase);
3338 release_region:
3339 release_mem_region(res->start, resource_size(res));
3340 check_prepare_enabled:
3326 if (!clk_ret) 3341 if (!clk_ret)
3342 disable_unprepare:
3327 clk_disable_unprepare(clk); 3343 clk_disable_unprepare(clk);
3328 if (!IS_ERR(clk)) 3344 if (!IS_ERR(clk))
3329 clk_put(clk); 3345 clk_put(clk);
3330 if (virtbase)
3331 iounmap(virtbase);
3332 if (res)
3333 release_mem_region(res->start,
3334 resource_size(res));
3335 if (virtbase)
3336 iounmap(virtbase);
3337
3338 if (base) {
3339 kfree(base->lcla_pool.alloc_map);
3340 kfree(base->reg_val_backup_chan);
3341 kfree(base->lookup_log_chans);
3342 kfree(base->lookup_phy_chans);
3343 kfree(base->phy_res);
3344 kfree(base);
3345 }
3346
3347 return NULL; 3346 return NULL;
3348} 3347}
3349 3348