summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 05:51:38 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-16 18:11:33 -0400
commit553fdf3534f856edce73744fd54914b9b7a829cc (patch)
tree0ba5a5ae18cf2b8caeaa091c3d25b7aee2a717cf /drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
parent974d541623929fa2622d27d5d338a5b63596794b (diff)
gpu: nvgpu: common: mm: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I129cc170d27c7f1f2e193b326b95ebbe3c75ebab Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795600 Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/bitmap_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/bitmap_allocator.c53
1 files changed, 35 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
index f75f9a1f..6cdb8f3b 100644
--- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -69,21 +69,24 @@ static u64 nvgpu_bitmap_alloc_fixed(struct nvgpu_allocator *__a,
69 69
70 /* Compute the bit offset and make sure it's aligned to a block. */ 70 /* Compute the bit offset and make sure it's aligned to a block. */
71 offs = base >> a->blk_shift; 71 offs = base >> a->blk_shift;
72 if (offs * a->blk_size != base) 72 if (offs * a->blk_size != base) {
73 return 0; 73 return 0;
74 }
74 75
75 offs -= a->bit_offs; 76 offs -= a->bit_offs;
76 77
77 blks = len >> a->blk_shift; 78 blks = len >> a->blk_shift;
78 if (blks * a->blk_size != len) 79 if (blks * a->blk_size != len) {
79 blks++; 80 blks++;
81 }
80 82
81 alloc_lock(__a); 83 alloc_lock(__a);
82 84
83 /* Check if the space requested is already occupied. */ 85 /* Check if the space requested is already occupied. */
84 ret = bitmap_find_next_zero_area(a->bitmap, a->num_bits, offs, blks, 0); 86 ret = bitmap_find_next_zero_area(a->bitmap, a->num_bits, offs, blks, 0);
85 if (ret != offs) 87 if (ret != offs) {
86 goto fail; 88 goto fail;
89 }
87 90
88 bitmap_set(a->bitmap, offs, blks); 91 bitmap_set(a->bitmap, offs, blks);
89 92
@@ -115,14 +118,16 @@ static void nvgpu_bitmap_free_fixed(struct nvgpu_allocator *__a,
115 u64 blks, offs; 118 u64 blks, offs;
116 119
117 offs = base >> a->blk_shift; 120 offs = base >> a->blk_shift;
118 if (WARN_ON(offs * a->blk_size != base)) 121 if (WARN_ON(offs * a->blk_size != base)) {
119 return; 122 return;
123 }
120 124
121 offs -= a->bit_offs; 125 offs -= a->bit_offs;
122 126
123 blks = len >> a->blk_shift; 127 blks = len >> a->blk_shift;
124 if (blks * a->blk_size != len) 128 if (blks * a->blk_size != len) {
125 blks++; 129 blks++;
130 }
126 131
127 alloc_lock(__a); 132 alloc_lock(__a);
128 bitmap_clear(a->bitmap, offs, blks); 133 bitmap_clear(a->bitmap, offs, blks);
@@ -155,8 +160,9 @@ static struct nvgpu_bitmap_alloc *find_alloc_metadata(
155 struct nvgpu_rbtree_node *node = NULL; 160 struct nvgpu_rbtree_node *node = NULL;
156 161
157 nvgpu_rbtree_search(addr, &node, a->allocs); 162 nvgpu_rbtree_search(addr, &node, a->allocs);
158 if (!node) 163 if (!node) {
159 return NULL; 164 return NULL;
165 }
160 166
161 alloc = nvgpu_bitmap_alloc_from_rbtree_node(node); 167 alloc = nvgpu_bitmap_alloc_from_rbtree_node(node);
162 168
@@ -174,8 +180,9 @@ static int __nvgpu_bitmap_store_alloc(struct nvgpu_bitmap_allocator *a,
174 struct nvgpu_bitmap_alloc *alloc = 180 struct nvgpu_bitmap_alloc *alloc =
175 nvgpu_kmem_cache_alloc(a->meta_data_cache); 181 nvgpu_kmem_cache_alloc(a->meta_data_cache);
176 182
177 if (!alloc) 183 if (!alloc) {
178 return -ENOMEM; 184 return -ENOMEM;
185 }
179 186
180 alloc->base = addr; 187 alloc->base = addr;
181 alloc->length = len; 188 alloc->length = len;
@@ -197,8 +204,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
197 204
198 blks = len >> a->blk_shift; 205 blks = len >> a->blk_shift;
199 206
200 if (blks * a->blk_size != len) 207 if (blks * a->blk_size != len) {
201 blks++; 208 blks++;
209 }
202 210
203 alloc_lock(__a); 211 alloc_lock(__a);
204 212
@@ -216,8 +224,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
216 limit = find_next_bit(a->bitmap, a->num_bits, a->next_blk); 224 limit = find_next_bit(a->bitmap, a->num_bits, a->next_blk);
217 offs = bitmap_find_next_zero_area(a->bitmap, limit, 225 offs = bitmap_find_next_zero_area(a->bitmap, limit,
218 0, blks, 0); 226 0, blks, 0);
219 if (offs >= a->next_blk) 227 if (offs >= a->next_blk) {
220 goto fail; 228 goto fail;
229 }
221 } 230 }
222 231
223 bitmap_set(a->bitmap, offs, blks); 232 bitmap_set(a->bitmap, offs, blks);
@@ -235,8 +244,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
235 * data it needs around to successfully free this allocation. 244 * data it needs around to successfully free this allocation.
236 */ 245 */
237 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) && 246 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) &&
238 __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) 247 __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) {
239 goto fail_reset_bitmap; 248 goto fail_reset_bitmap;
249 }
240 250
241 alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]", 251 alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]",
242 addr, len, blks, blks); 252 addr, len, blks, blks);
@@ -270,8 +280,9 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr)
270 } 280 }
271 281
272 alloc = find_alloc_metadata(a, addr); 282 alloc = find_alloc_metadata(a, addr);
273 if (!alloc) 283 if (!alloc) {
274 goto done; 284 goto done;
285 }
275 286
276 /* 287 /*
277 * Address comes from adjusted offset (i.e the bit offset with 288 * Address comes from adjusted offset (i.e the bit offset with
@@ -288,8 +299,9 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr)
288 a->bytes_freed += alloc->length; 299 a->bytes_freed += alloc->length;
289 300
290done: 301done:
291 if (a->meta_data_cache && alloc) 302 if (a->meta_data_cache && alloc) {
292 nvgpu_kmem_cache_free(a->meta_data_cache, alloc); 303 nvgpu_kmem_cache_free(a->meta_data_cache, alloc);
304 }
293 alloc_unlock(__a); 305 alloc_unlock(__a);
294} 306}
295 307
@@ -366,16 +378,18 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
366 int err; 378 int err;
367 struct nvgpu_bitmap_allocator *a; 379 struct nvgpu_bitmap_allocator *a;
368 380
369 if (WARN_ON(blk_size & (blk_size - 1))) 381 if (WARN_ON(blk_size & (blk_size - 1))) {
370 return -EINVAL; 382 return -EINVAL;
383 }
371 384
372 /* 385 /*
373 * blk_size must be a power-of-2; base length also need to be aligned 386 * blk_size must be a power-of-2; base length also need to be aligned
374 * to blk_size. 387 * to blk_size.
375 */ 388 */
376 if (blk_size & (blk_size - 1) || 389 if (blk_size & (blk_size - 1) ||
377 base & (blk_size - 1) || length & (blk_size - 1)) 390 base & (blk_size - 1) || length & (blk_size - 1)) {
378 return -EINVAL; 391 return -EINVAL;
392 }
379 393
380 if (base == 0) { 394 if (base == 0) {
381 base = blk_size; 395 base = blk_size;
@@ -383,12 +397,14 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
383 } 397 }
384 398
385 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator)); 399 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator));
386 if (!a) 400 if (!a) {
387 return -ENOMEM; 401 return -ENOMEM;
402 }
388 403
389 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &bitmap_ops); 404 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &bitmap_ops);
390 if (err) 405 if (err) {
391 goto fail; 406 goto fail;
407 }
392 408
393 if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) { 409 if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) {
394 a->meta_data_cache = nvgpu_kmem_cache_create(g, 410 a->meta_data_cache = nvgpu_kmem_cache_create(g,
@@ -431,8 +447,9 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
431 return 0; 447 return 0;
432 448
433fail: 449fail:
434 if (a->meta_data_cache) 450 if (a->meta_data_cache) {
435 nvgpu_kmem_cache_destroy(a->meta_data_cache); 451 nvgpu_kmem_cache_destroy(a->meta_data_cache);
452 }
436 nvgpu_kfree(g, a); 453 nvgpu_kfree(g, a);
437 return err; 454 return err;
438} 455}