diff options
author | Sai Nikhil <snikhil@nvidia.com> | 2018-08-17 01:20:17 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-29 11:59:31 -0400 |
commit | 2f97e683feed3c3ba3c8722c4f6ab7466bcef0c0 (patch) | |
tree | c0f90c3dc6909122cfde071efff8ff24d2b61471 /drivers/gpu/nvgpu/common/mm/buddy_allocator.c | |
parent | 19cd7ffb5def933db323fe682ec4a263eb1923f9 (diff) |
gpu: nvgpu: common: fix MISRA Rule 10.4
MISRA Rule 10.4 only allows the usage of arithmetic operations on
operands of the same essential type category.
Adding "U" at the end of the integer literals to have same type of
operands when an arithmetic operation is performed.
This fix violations where an arithmetic operation is performed on
signed and unsigned int types.
In balloc_get_order_list() the argument "int order" has been changed to
a u64 because all callers of this function pass a u64 argument.
JIRA NVGPU-992
Change-Id: Ie2964f9f1dfb2865a9bd6e6cdd65e7cda6c1f638
Signed-off-by: Sai Nikhil <snikhil@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1784419
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Adeel Raza <araza@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/buddy_allocator.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/buddy_allocator.c | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c index e684e637..a9f90069 100644 --- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c | |||
@@ -74,7 +74,7 @@ static void balloc_compute_max_order(struct nvgpu_buddy_allocator *a) | |||
74 | { | 74 | { |
75 | u64 true_max_order = ilog2(a->blks); | 75 | u64 true_max_order = ilog2(a->blks); |
76 | 76 | ||
77 | if (a->max_order == 0) { | 77 | if (a->max_order == 0U) { |
78 | a->max_order = true_max_order; | 78 | a->max_order = true_max_order; |
79 | return; | 79 | return; |
80 | } | 80 | } |
@@ -95,7 +95,7 @@ static void balloc_allocator_align(struct nvgpu_buddy_allocator *a) | |||
95 | { | 95 | { |
96 | a->start = ALIGN(a->base, a->blk_size); | 96 | a->start = ALIGN(a->base, a->blk_size); |
97 | WARN_ON(a->start != a->base); | 97 | WARN_ON(a->start != a->base); |
98 | a->end = (a->base + a->length) & ~(a->blk_size - 1); | 98 | a->end = (a->base + a->length) & ~(a->blk_size - 1U); |
99 | a->count = a->end - a->start; | 99 | a->count = a->end - a->start; |
100 | a->blks = a->count >> a->blk_shift; | 100 | a->blks = a->count >> a->blk_shift; |
101 | } | 101 | } |
@@ -119,7 +119,7 @@ static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a, | |||
119 | new_buddy->parent = parent; | 119 | new_buddy->parent = parent; |
120 | new_buddy->start = start; | 120 | new_buddy->start = start; |
121 | new_buddy->order = order; | 121 | new_buddy->order = order; |
122 | new_buddy->end = start + (1 << order) * a->blk_size; | 122 | new_buddy->end = start + (U64(1) << order) * a->blk_size; |
123 | new_buddy->pte_size = BALLOC_PTE_SIZE_ANY; | 123 | new_buddy->pte_size = BALLOC_PTE_SIZE_ANY; |
124 | 124 | ||
125 | return new_buddy; | 125 | return new_buddy; |
@@ -185,7 +185,7 @@ static void balloc_blist_rem(struct nvgpu_buddy_allocator *a, | |||
185 | 185 | ||
186 | static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len) | 186 | static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len) |
187 | { | 187 | { |
188 | if (len == 0) { | 188 | if (len == 0U) { |
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
@@ -200,7 +200,7 @@ static u64 __balloc_max_order_in(struct nvgpu_buddy_allocator *a, | |||
200 | { | 200 | { |
201 | u64 size = (end - start) >> a->blk_shift; | 201 | u64 size = (end - start) >> a->blk_shift; |
202 | 202 | ||
203 | if (size > 0) { | 203 | if (size > 0U) { |
204 | return min_t(u64, ilog2(size), a->max_order); | 204 | return min_t(u64, ilog2(size), a->max_order); |
205 | } else { | 205 | } else { |
206 | return GPU_BALLOC_MAX_ORDER; | 206 | return GPU_BALLOC_MAX_ORDER; |
@@ -212,7 +212,7 @@ static u64 __balloc_max_order_in(struct nvgpu_buddy_allocator *a, | |||
212 | */ | 212 | */ |
213 | static int balloc_init_lists(struct nvgpu_buddy_allocator *a) | 213 | static int balloc_init_lists(struct nvgpu_buddy_allocator *a) |
214 | { | 214 | { |
215 | int i; | 215 | u32 i; |
216 | u64 bstart, bend, order; | 216 | u64 bstart, bend, order; |
217 | struct nvgpu_buddy *buddy; | 217 | struct nvgpu_buddy *buddy; |
218 | 218 | ||
@@ -220,7 +220,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a) | |||
220 | bend = a->end; | 220 | bend = a->end; |
221 | 221 | ||
222 | /* First make sure the LLs are valid. */ | 222 | /* First make sure the LLs are valid. */ |
223 | for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { | 223 | for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { |
224 | nvgpu_init_list_node(balloc_get_order_list(a, i)); | 224 | nvgpu_init_list_node(balloc_get_order_list(a, i)); |
225 | } | 225 | } |
226 | 226 | ||
@@ -239,7 +239,7 @@ static int balloc_init_lists(struct nvgpu_buddy_allocator *a) | |||
239 | return 0; | 239 | return 0; |
240 | 240 | ||
241 | cleanup: | 241 | cleanup: |
242 | for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { | 242 | for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { |
243 | if (!nvgpu_list_empty(balloc_get_order_list(a, i))) { | 243 | if (!nvgpu_list_empty(balloc_get_order_list(a, i))) { |
244 | buddy = nvgpu_list_first_entry( | 244 | buddy = nvgpu_list_first_entry( |
245 | balloc_get_order_list(a, i), | 245 | balloc_get_order_list(a, i), |
@@ -257,7 +257,7 @@ cleanup: | |||
257 | */ | 257 | */ |
258 | static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na) | 258 | static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na) |
259 | { | 259 | { |
260 | int i; | 260 | u32 i; |
261 | struct nvgpu_rbtree_node *node = NULL; | 261 | struct nvgpu_rbtree_node *node = NULL; |
262 | struct nvgpu_buddy *bud; | 262 | struct nvgpu_buddy *bud; |
263 | struct nvgpu_fixed_alloc *falloc; | 263 | struct nvgpu_fixed_alloc *falloc; |
@@ -299,8 +299,8 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na) | |||
299 | /* | 299 | /* |
300 | * Now clean up the unallocated buddies. | 300 | * Now clean up the unallocated buddies. |
301 | */ | 301 | */ |
302 | for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { | 302 | for (i = 0U; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { |
303 | BUG_ON(a->buddy_list_alloced[i] != 0); | 303 | BUG_ON(a->buddy_list_alloced[i] != 0U); |
304 | 304 | ||
305 | while (!nvgpu_list_empty(balloc_get_order_list(a, i))) { | 305 | while (!nvgpu_list_empty(balloc_get_order_list(a, i))) { |
306 | bud = nvgpu_list_first_entry( | 306 | bud = nvgpu_list_first_entry( |
@@ -310,19 +310,19 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *na) | |||
310 | nvgpu_kmem_cache_free(a->buddy_cache, bud); | 310 | nvgpu_kmem_cache_free(a->buddy_cache, bud); |
311 | } | 311 | } |
312 | 312 | ||
313 | if (a->buddy_list_len[i] != 0) { | 313 | if (a->buddy_list_len[i] != 0U) { |
314 | nvgpu_info(na->g, | 314 | nvgpu_info(na->g, |
315 | "Excess buddies!!! (%d: %llu)", | 315 | "Excess buddies!!! (%d: %llu)", |
316 | i, a->buddy_list_len[i]); | 316 | i, a->buddy_list_len[i]); |
317 | BUG(); | 317 | BUG(); |
318 | } | 318 | } |
319 | if (a->buddy_list_split[i] != 0) { | 319 | if (a->buddy_list_split[i] != 0U) { |
320 | nvgpu_info(na->g, | 320 | nvgpu_info(na->g, |
321 | "Excess split nodes!!! (%d: %llu)", | 321 | "Excess split nodes!!! (%d: %llu)", |
322 | i, a->buddy_list_split[i]); | 322 | i, a->buddy_list_split[i]); |
323 | BUG(); | 323 | BUG(); |
324 | } | 324 | } |
325 | if (a->buddy_list_alloced[i] != 0) { | 325 | if (a->buddy_list_alloced[i] != 0U) { |
326 | nvgpu_info(na->g, | 326 | nvgpu_info(na->g, |
327 | "Excess alloced nodes!!! (%d: %llu)", | 327 | "Excess alloced nodes!!! (%d: %llu)", |
328 | i, a->buddy_list_alloced[i]); | 328 | i, a->buddy_list_alloced[i]); |
@@ -392,14 +392,14 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a, | |||
392 | struct nvgpu_buddy *left, *right; | 392 | struct nvgpu_buddy *left, *right; |
393 | u64 half; | 393 | u64 half; |
394 | 394 | ||
395 | left = balloc_new_buddy(a, b, b->start, b->order - 1); | 395 | left = balloc_new_buddy(a, b, b->start, b->order - 1U); |
396 | if (!left) { | 396 | if (!left) { |
397 | return -ENOMEM; | 397 | return -ENOMEM; |
398 | } | 398 | } |
399 | 399 | ||
400 | half = (b->end - b->start) / 2; | 400 | half = (b->end - b->start) / 2U; |
401 | 401 | ||
402 | right = balloc_new_buddy(a, b, b->start + half, b->order - 1); | 402 | right = balloc_new_buddy(a, b, b->start + half, b->order - 1U); |
403 | if (!right) { | 403 | if (!right) { |
404 | nvgpu_kmem_cache_free(a->buddy_cache, left); | 404 | nvgpu_kmem_cache_free(a->buddy_cache, left); |
405 | return -ENOMEM; | 405 | return -ENOMEM; |
@@ -624,7 +624,7 @@ static void __balloc_get_parent_range(struct nvgpu_buddy_allocator *a, | |||
624 | u64 shifted_base = balloc_base_shift(a, base); | 624 | u64 shifted_base = balloc_base_shift(a, base); |
625 | 625 | ||
626 | order++; | 626 | order++; |
627 | base_mask = ~((a->blk_size << order) - 1); | 627 | base_mask = ~((a->blk_size << order) - 1U); |
628 | 628 | ||
629 | shifted_base &= base_mask; | 629 | shifted_base &= base_mask; |
630 | 630 | ||
@@ -720,7 +720,7 @@ static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a, | |||
720 | u64 align_order; | 720 | u64 align_order; |
721 | 721 | ||
722 | shifted_base = balloc_base_shift(a, base); | 722 | shifted_base = balloc_base_shift(a, base); |
723 | if (shifted_base == 0) { | 723 | if (shifted_base == 0U) { |
724 | align_order = __fls(len >> a->blk_shift); | 724 | align_order = __fls(len >> a->blk_shift); |
725 | } else { | 725 | } else { |
726 | align_order = min_t(u64, | 726 | align_order = min_t(u64, |
@@ -871,11 +871,11 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na, | |||
871 | struct nvgpu_buddy_allocator *a = na->priv; | 871 | struct nvgpu_buddy_allocator *a = na->priv; |
872 | 872 | ||
873 | /* If base isn't aligned to an order 0 block, fail. */ | 873 | /* If base isn't aligned to an order 0 block, fail. */ |
874 | if (base & (a->blk_size - 1)) { | 874 | if (base & (a->blk_size - 1U)) { |
875 | goto fail; | 875 | goto fail; |
876 | } | 876 | } |
877 | 877 | ||
878 | if (len == 0) { | 878 | if (len == 0U) { |
879 | goto fail; | 879 | goto fail; |
880 | } | 880 | } |
881 | 881 | ||
@@ -1255,10 +1255,10 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1255 | struct nvgpu_buddy_allocator *a; | 1255 | struct nvgpu_buddy_allocator *a; |
1256 | 1256 | ||
1257 | /* blk_size must be greater than 0 and a power of 2. */ | 1257 | /* blk_size must be greater than 0 and a power of 2. */ |
1258 | if (blk_size == 0) { | 1258 | if (blk_size == 0U) { |
1259 | return -EINVAL; | 1259 | return -EINVAL; |
1260 | } | 1260 | } |
1261 | if (blk_size & (blk_size - 1)) { | 1261 | if (blk_size & (blk_size - 1U)) { |
1262 | return -EINVAL; | 1262 | return -EINVAL; |
1263 | } | 1263 | } |
1264 | 1264 | ||
@@ -1291,7 +1291,7 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1291 | * If base is 0 then modfy base to be the size of one block so that we | 1291 | * If base is 0 then modfy base to be the size of one block so that we |
1292 | * can return errors by returning addr == 0. | 1292 | * can return errors by returning addr == 0. |
1293 | */ | 1293 | */ |
1294 | if (a->base == 0) { | 1294 | if (a->base == 0U) { |
1295 | a->base = a->blk_size; | 1295 | a->base = a->blk_size; |
1296 | a->length -= a->blk_size; | 1296 | a->length -= a->blk_size; |
1297 | } | 1297 | } |
@@ -1308,8 +1308,8 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | |||
1308 | * requirement is not necessary. | 1308 | * requirement is not necessary. |
1309 | */ | 1309 | */ |
1310 | if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages && | 1310 | if (flags & GPU_ALLOC_GVA_SPACE && vm->big_pages && |
1311 | (base & ((vm->big_page_size << 10) - 1) || | 1311 | (base & ((vm->big_page_size << 10) - 1U) || |
1312 | size & ((vm->big_page_size << 10) - 1))) { | 1312 | size & ((vm->big_page_size << 10) - 1U))) { |
1313 | return -EINVAL; | 1313 | return -EINVAL; |
1314 | } | 1314 | } |
1315 | 1315 | ||