summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 05:51:38 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-16 18:11:33 -0400
commit553fdf3534f856edce73744fd54914b9b7a829cc (patch)
tree0ba5a5ae18cf2b8caeaa091c3d25b7aee2a717cf
parent974d541623929fa2622d27d5d338a5b63596794b (diff)
gpu: nvgpu: common: mm: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I129cc170d27c7f1f2e193b326b95ebbe3c75ebab Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795600 Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/bitmap_allocator.c53
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c38
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c110
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c40
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c34
5 files changed, 178 insertions, 97 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
index f75f9a1f..6cdb8f3b 100644
--- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -69,21 +69,24 @@ static u64 nvgpu_bitmap_alloc_fixed(struct nvgpu_allocator *__a,
69 69
70 /* Compute the bit offset and make sure it's aligned to a block. */ 70 /* Compute the bit offset and make sure it's aligned to a block. */
71 offs = base >> a->blk_shift; 71 offs = base >> a->blk_shift;
72 if (offs * a->blk_size != base) 72 if (offs * a->blk_size != base) {
73 return 0; 73 return 0;
74 }
74 75
75 offs -= a->bit_offs; 76 offs -= a->bit_offs;
76 77
77 blks = len >> a->blk_shift; 78 blks = len >> a->blk_shift;
78 if (blks * a->blk_size != len) 79 if (blks * a->blk_size != len) {
79 blks++; 80 blks++;
81 }
80 82
81 alloc_lock(__a); 83 alloc_lock(__a);
82 84
83 /* Check if the space requested is already occupied. */ 85 /* Check if the space requested is already occupied. */
84 ret = bitmap_find_next_zero_area(a->bitmap, a->num_bits, offs, blks, 0); 86 ret = bitmap_find_next_zero_area(a->bitmap, a->num_bits, offs, blks, 0);
85 if (ret != offs) 87 if (ret != offs) {
86 goto fail; 88 goto fail;
89 }
87 90
88 bitmap_set(a->bitmap, offs, blks); 91 bitmap_set(a->bitmap, offs, blks);
89 92
@@ -115,14 +118,16 @@ static void nvgpu_bitmap_free_fixed(struct nvgpu_allocator *__a,
115 u64 blks, offs; 118 u64 blks, offs;
116 119
117 offs = base >> a->blk_shift; 120 offs = base >> a->blk_shift;
118 if (WARN_ON(offs * a->blk_size != base)) 121 if (WARN_ON(offs * a->blk_size != base)) {
119 return; 122 return;
123 }
120 124
121 offs -= a->bit_offs; 125 offs -= a->bit_offs;
122 126
123 blks = len >> a->blk_shift; 127 blks = len >> a->blk_shift;
124 if (blks * a->blk_size != len) 128 if (blks * a->blk_size != len) {
125 blks++; 129 blks++;
130 }
126 131
127 alloc_lock(__a); 132 alloc_lock(__a);
128 bitmap_clear(a->bitmap, offs, blks); 133 bitmap_clear(a->bitmap, offs, blks);
@@ -155,8 +160,9 @@ static struct nvgpu_bitmap_alloc *find_alloc_metadata(
155 struct nvgpu_rbtree_node *node = NULL; 160 struct nvgpu_rbtree_node *node = NULL;
156 161
157 nvgpu_rbtree_search(addr, &node, a->allocs); 162 nvgpu_rbtree_search(addr, &node, a->allocs);
158 if (!node) 163 if (!node) {
159 return NULL; 164 return NULL;
165 }
160 166
161 alloc = nvgpu_bitmap_alloc_from_rbtree_node(node); 167 alloc = nvgpu_bitmap_alloc_from_rbtree_node(node);
162 168
@@ -174,8 +180,9 @@ static int __nvgpu_bitmap_store_alloc(struct nvgpu_bitmap_allocator *a,
174 struct nvgpu_bitmap_alloc *alloc = 180 struct nvgpu_bitmap_alloc *alloc =
175 nvgpu_kmem_cache_alloc(a->meta_data_cache); 181 nvgpu_kmem_cache_alloc(a->meta_data_cache);
176 182
177 if (!alloc) 183 if (!alloc) {
178 return -ENOMEM; 184 return -ENOMEM;
185 }
179 186
180 alloc->base = addr; 187 alloc->base = addr;
181 alloc->length = len; 188 alloc->length = len;
@@ -197,8 +204,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
197 204
198 blks = len >> a->blk_shift; 205 blks = len >> a->blk_shift;
199 206
200 if (blks * a->blk_size != len) 207 if (blks * a->blk_size != len) {
201 blks++; 208 blks++;
209 }
202 210
203 alloc_lock(__a); 211 alloc_lock(__a);
204 212
@@ -216,8 +224,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
216 limit = find_next_bit(a->bitmap, a->num_bits, a->next_blk); 224 limit = find_next_bit(a->bitmap, a->num_bits, a->next_blk);
217 offs = bitmap_find_next_zero_area(a->bitmap, limit, 225 offs = bitmap_find_next_zero_area(a->bitmap, limit,
218 0, blks, 0); 226 0, blks, 0);
219 if (offs >= a->next_blk) 227 if (offs >= a->next_blk) {
220 goto fail; 228 goto fail;
229 }
221 } 230 }
222 231
223 bitmap_set(a->bitmap, offs, blks); 232 bitmap_set(a->bitmap, offs, blks);
@@ -235,8 +244,9 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
235 * data it needs around to successfully free this allocation. 244 * data it needs around to successfully free this allocation.
236 */ 245 */
237 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) && 246 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) &&
238 __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) 247 __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) {
239 goto fail_reset_bitmap; 248 goto fail_reset_bitmap;
249 }
240 250
241 alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]", 251 alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]",
242 addr, len, blks, blks); 252 addr, len, blks, blks);
@@ -270,8 +280,9 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr)
270 } 280 }
271 281
272 alloc = find_alloc_metadata(a, addr); 282 alloc = find_alloc_metadata(a, addr);
273 if (!alloc) 283 if (!alloc) {
274 goto done; 284 goto done;
285 }
275 286
276 /* 287 /*
277 * Address comes from adjusted offset (i.e the bit offset with 288 * Address comes from adjusted offset (i.e the bit offset with
@@ -288,8 +299,9 @@ static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr)
288 a->bytes_freed += alloc->length; 299 a->bytes_freed += alloc->length;
289 300
290done: 301done:
291 if (a->meta_data_cache && alloc) 302 if (a->meta_data_cache && alloc) {
292 nvgpu_kmem_cache_free(a->meta_data_cache, alloc); 303 nvgpu_kmem_cache_free(a->meta_data_cache, alloc);
304 }
293 alloc_unlock(__a); 305 alloc_unlock(__a);
294} 306}
295 307
@@ -366,16 +378,18 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
366 int err; 378 int err;
367 struct nvgpu_bitmap_allocator *a; 379 struct nvgpu_bitmap_allocator *a;
368 380
369 if (WARN_ON(blk_size & (blk_size - 1))) 381 if (WARN_ON(blk_size & (blk_size - 1))) {
370 return -EINVAL; 382 return -EINVAL;
383 }
371 384
372 /* 385 /*
373 * blk_size must be a power-of-2; base length also need to be aligned 386 * blk_size must be a power-of-2; base length also need to be aligned
374 * to blk_size. 387 * to blk_size.
375 */ 388 */
376 if (blk_size & (blk_size - 1) || 389 if (blk_size & (blk_size - 1) ||
377 base & (blk_size - 1) || length & (blk_size - 1)) 390 base & (blk_size - 1) || length & (blk_size - 1)) {
378 return -EINVAL; 391 return -EINVAL;
392 }
379 393
380 if (base == 0) { 394 if (base == 0) {
381 base = blk_size; 395 base = blk_size;
@@ -383,12 +397,14 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
383 } 397 }
384 398
385 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator)); 399 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator));
386 if (!a) 400 if (!a) {
387 return -ENOMEM; 401 return -ENOMEM;
402 }
388 403
389 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &bitmap_ops); 404 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &bitmap_ops);
390 if (err) 405 if (err) {
391 goto fail; 406 goto fail;
407 }
392 408
393 if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) { 409 if (!(flags & GPU_ALLOC_NO_ALLOC_PAGE)) {
394 a->meta_data_cache = nvgpu_kmem_cache_create(g, 410 a->meta_data_cache = nvgpu_kmem_cache_create(g,
@@ -431,8 +447,9 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
431 return 0; 447 return 0;
432 448
433fail: 449fail:
434 if (a->meta_data_cache) 450 if (a->meta_data_cache) {
435 nvgpu_kmem_cache_destroy(a->meta_data_cache); 451 nvgpu_kmem_cache_destroy(a->meta_data_cache);
452 }
436 nvgpu_kfree(g, a); 453 nvgpu_kfree(g, a);
437 return err; 454 return err;
438} 455}
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
index c749c729..a0b9013f 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * gk20a allocator 2 * gk20a allocator
3 * 3 *
4 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -29,40 +29,45 @@
29 29
30u64 nvgpu_alloc_length(struct nvgpu_allocator *a) 30u64 nvgpu_alloc_length(struct nvgpu_allocator *a)
31{ 31{
32 if (a->ops->length) 32 if (a->ops->length) {
33 return a->ops->length(a); 33 return a->ops->length(a);
34 }
34 35
35 return 0; 36 return 0;
36} 37}
37 38
38u64 nvgpu_alloc_base(struct nvgpu_allocator *a) 39u64 nvgpu_alloc_base(struct nvgpu_allocator *a)
39{ 40{
40 if (a->ops->base) 41 if (a->ops->base) {
41 return a->ops->base(a); 42 return a->ops->base(a);
43 }
42 44
43 return 0; 45 return 0;
44} 46}
45 47
46u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a) 48u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a)
47{ 49{
48 if (!a->ops || !a->ops->inited) 50 if (!a->ops || !a->ops->inited) {
49 return 0; 51 return 0;
52 }
50 53
51 return a->ops->inited(a); 54 return a->ops->inited(a);
52} 55}
53 56
54u64 nvgpu_alloc_end(struct nvgpu_allocator *a) 57u64 nvgpu_alloc_end(struct nvgpu_allocator *a)
55{ 58{
56 if (a->ops->end) 59 if (a->ops->end) {
57 return a->ops->end(a); 60 return a->ops->end(a);
61 }
58 62
59 return 0; 63 return 0;
60} 64}
61 65
62u64 nvgpu_alloc_space(struct nvgpu_allocator *a) 66u64 nvgpu_alloc_space(struct nvgpu_allocator *a)
63{ 67{
64 if (a->ops->space) 68 if (a->ops->space) {
65 return a->ops->space(a); 69 return a->ops->space(a);
70 }
66 71
67 return 0; 72 return 0;
68} 73}
@@ -80,8 +85,9 @@ void nvgpu_free(struct nvgpu_allocator *a, u64 addr)
80u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len, 85u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len,
81 u32 page_size) 86 u32 page_size)
82{ 87{
83 if (a->ops->alloc_fixed) 88 if (a->ops->alloc_fixed) {
84 return a->ops->alloc_fixed(a, base, len, page_size); 89 return a->ops->alloc_fixed(a, base, len, page_size);
90 }
85 91
86 return 0; 92 return 0;
87} 93}
@@ -93,15 +99,17 @@ void nvgpu_free_fixed(struct nvgpu_allocator *a, u64 base, u64 len)
93 * nothing. The alternative would be to fall back on the regular 99 * nothing. The alternative would be to fall back on the regular
94 * free but that may be harmful in unexpected ways. 100 * free but that may be harmful in unexpected ways.
95 */ 101 */
96 if (a->ops->free_fixed) 102 if (a->ops->free_fixed) {
97 a->ops->free_fixed(a, base, len); 103 a->ops->free_fixed(a, base, len);
104 }
98} 105}
99 106
100int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a, 107int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a,
101 struct nvgpu_alloc_carveout *co) 108 struct nvgpu_alloc_carveout *co)
102{ 109{
103 if (a->ops->reserve_carveout) 110 if (a->ops->reserve_carveout) {
104 return a->ops->reserve_carveout(a, co); 111 return a->ops->reserve_carveout(a, co);
112 }
105 113
106 return -ENODEV; 114 return -ENODEV;
107} 115}
@@ -109,8 +117,9 @@ int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a,
109void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, 117void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a,
110 struct nvgpu_alloc_carveout *co) 118 struct nvgpu_alloc_carveout *co)
111{ 119{
112 if (a->ops->release_carveout) 120 if (a->ops->release_carveout) {
113 a->ops->release_carveout(a, co); 121 a->ops->release_carveout(a, co);
122 }
114} 123}
115 124
116void nvgpu_alloc_destroy(struct nvgpu_allocator *a) 125void nvgpu_alloc_destroy(struct nvgpu_allocator *a)
@@ -137,19 +146,22 @@ int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
137{ 146{
138 int err; 147 int err;
139 148
140 if (!ops) 149 if (!ops) {
141 return -EINVAL; 150 return -EINVAL;
151 }
142 152
143 /* 153 /*
144 * This is the bare minimum operations required for a sensible 154 * This is the bare minimum operations required for a sensible
145 * allocator. 155 * allocator.
146 */ 156 */
147 if (!ops->alloc || !ops->free || !ops->fini) 157 if (!ops->alloc || !ops->free || !ops->fini) {
148 return -EINVAL; 158 return -EINVAL;
159 }
149 160
150 err = nvgpu_mutex_init(&a->lock); 161 err = nvgpu_mutex_init(&a->lock);
151 if (err) 162 if (err) {
152 return err; 163 return err;
164 }
153 165
154 a->g = g; 166 a->g = g;
155 a->ops = ops; 167 a->ops = ops;
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 773d33ef..d001a2aa 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -264,8 +264,9 @@ static struct nvgpu_page_alloc *__find_page_alloc(
264 struct nvgpu_rbtree_node *node = NULL; 264 struct nvgpu_rbtree_node *node = NULL;
265 265
266 nvgpu_rbtree_search(addr, &node, a->allocs); 266 nvgpu_rbtree_search(addr, &node, a->allocs);
267 if (!node) 267 if (!node) {
268 return NULL; 268 return NULL;
269 }
269 270
270 alloc = nvgpu_page_alloc_from_rbtree_node(node); 271 alloc = nvgpu_page_alloc_from_rbtree_node(node);
271 272
@@ -355,8 +356,9 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
355 356
356 if (!slab_page) { 357 if (!slab_page) {
357 slab_page = alloc_slab_page(a, slab); 358 slab_page = alloc_slab_page(a, slab);
358 if (!slab_page) 359 if (!slab_page) {
359 return -ENOMEM; 360 return -ENOMEM;
361 }
360 } 362 }
361 363
362 /* 364 /*
@@ -376,12 +378,13 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
376 bitmap_set(&slab_page->bitmap, offs, 1); 378 bitmap_set(&slab_page->bitmap, offs, 1);
377 slab_page->nr_objects_alloced++; 379 slab_page->nr_objects_alloced++;
378 380
379 if (slab_page->nr_objects_alloced < slab_page->nr_objects) 381 if (slab_page->nr_objects_alloced < slab_page->nr_objects) {
380 add_slab_page_to_partial(slab, slab_page); 382 add_slab_page_to_partial(slab, slab_page);
381 else if (slab_page->nr_objects_alloced == slab_page->nr_objects) 383 } else if (slab_page->nr_objects_alloced == slab_page->nr_objects) {
382 add_slab_page_to_full(slab, slab_page); 384 add_slab_page_to_full(slab, slab_page);
383 else 385 } else {
384 BUG(); /* Should be impossible to hit this. */ 386 BUG(); /* Should be impossible to hit this. */
387 }
385 388
386 /* 389 /*
387 * Handle building the nvgpu_page_alloc struct. We expect one sgl 390 * Handle building the nvgpu_page_alloc struct. We expect one sgl
@@ -435,8 +438,9 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
435 438
436 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; 439 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
437 err = __do_slab_alloc(a, slab, alloc); 440 err = __do_slab_alloc(a, slab, alloc);
438 if (err) 441 if (err) {
439 goto fail; 442 goto fail;
443 }
440 444
441 palloc_dbg(a, "Alloc 0x%04llx sr=%d id=0x%010llx [slab]", 445 palloc_dbg(a, "Alloc 0x%04llx sr=%d id=0x%010llx [slab]",
442 len, slab_nr, alloc->base); 446 len, slab_nr, alloc->base);
@@ -445,10 +449,12 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
445 return alloc; 449 return alloc;
446 450
447fail: 451fail:
448 if (alloc) 452 if (alloc) {
449 nvgpu_kmem_cache_free(a->alloc_cache, alloc); 453 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
450 if (sgl) 454 }
455 if (sgl) {
451 nvgpu_kfree(a->owner->g, sgl); 456 nvgpu_kfree(a->owner->g, sgl);
457 }
452 return NULL; 458 return NULL;
453} 459}
454 460
@@ -465,27 +471,30 @@ static void __nvgpu_free_slab(struct nvgpu_page_allocator *a,
465 471
466 slab_page->nr_objects_alloced--; 472 slab_page->nr_objects_alloced--;
467 473
468 if (slab_page->nr_objects_alloced == 0) 474 if (slab_page->nr_objects_alloced == 0) {
469 new_state = SP_EMPTY; 475 new_state = SP_EMPTY;
470 else 476 } else {
471 new_state = SP_PARTIAL; 477 new_state = SP_PARTIAL;
478 }
472 479
473 /* 480 /*
474 * Need to migrate the page to a different list. 481 * Need to migrate the page to a different list.
475 */ 482 */
476 if (new_state != slab_page->state) { 483 if (new_state != slab_page->state) {
477 /* Delete - can't be in empty. */ 484 /* Delete - can't be in empty. */
478 if (slab_page->state == SP_PARTIAL) 485 if (slab_page->state == SP_PARTIAL) {
479 del_slab_page_from_partial(slab, slab_page); 486 del_slab_page_from_partial(slab, slab_page);
480 else 487 } else {
481 del_slab_page_from_full(slab, slab_page); 488 del_slab_page_from_full(slab, slab_page);
489 }
482 490
483 /* And add. */ 491 /* And add. */
484 if (new_state == SP_EMPTY) { 492 if (new_state == SP_EMPTY) {
485 if (nvgpu_list_empty(&slab->empty)) 493 if (nvgpu_list_empty(&slab->empty)) {
486 add_slab_page_to_empty(slab, slab_page); 494 add_slab_page_to_empty(slab, slab_page);
487 else 495 } else {
488 free_slab_page(a, slab_page); 496 free_slab_page(a, slab_page);
497 }
489 } else { 498 } else {
490 add_slab_page_to_partial(slab, slab_page); 499 add_slab_page_to_partial(slab, slab_page);
491 } 500 }
@@ -515,8 +524,9 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
515 int i = 0; 524 int i = 0;
516 525
517 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); 526 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
518 if (!alloc) 527 if (!alloc) {
519 goto fail; 528 goto fail;
529 }
520 530
521 memset(alloc, 0, sizeof(*alloc)); 531 memset(alloc, 0, sizeof(*alloc));
522 532
@@ -535,11 +545,13 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
535 * requested size. The buddy allocator guarantees any given 545 * requested size. The buddy allocator guarantees any given
536 * single alloc is contiguous. 546 * single alloc is contiguous.
537 */ 547 */
538 if (a->flags & GPU_ALLOC_FORCE_CONTIG && i != 0) 548 if (a->flags & GPU_ALLOC_FORCE_CONTIG && i != 0) {
539 goto fail_cleanup; 549 goto fail_cleanup;
550 }
540 551
541 if (chunk_len > max_chunk_len) 552 if (chunk_len > max_chunk_len) {
542 chunk_len = max_chunk_len; 553 chunk_len = max_chunk_len;
554 }
543 555
544 /* 556 /*
545 * Keep attempting to allocate in smaller chunks until the alloc 557 * Keep attempting to allocate in smaller chunks until the alloc
@@ -582,10 +594,11 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
582 * Build the singly linked list with a head node that is part of 594 * Build the singly linked list with a head node that is part of
583 * the list. 595 * the list.
584 */ 596 */
585 if (prev_sgl) 597 if (prev_sgl) {
586 prev_sgl->next = sgl; 598 prev_sgl->next = sgl;
587 else 599 } else {
588 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl; 600 alloc->sgt.sgl = (struct nvgpu_sgl *)sgl;
601 }
589 602
590 prev_sgl = sgl; 603 prev_sgl = sgl;
591 604
@@ -671,10 +684,11 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len)
671 684
672 alloc_lock(__a); 685 alloc_lock(__a);
673 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && 686 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES &&
674 real_len <= (a->page_size / 2)) 687 real_len <= (a->page_size / 2)) {
675 alloc = __nvgpu_alloc_slab(a, real_len); 688 alloc = __nvgpu_alloc_slab(a, real_len);
676 else 689 } else {
677 alloc = __nvgpu_alloc_pages(a, real_len); 690 alloc = __nvgpu_alloc_pages(a, real_len);
691 }
678 692
679 if (!alloc) { 693 if (!alloc) {
680 alloc_unlock(__a); 694 alloc_unlock(__a);
@@ -684,14 +698,16 @@ static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len)
684 __insert_page_alloc(a, alloc); 698 __insert_page_alloc(a, alloc);
685 699
686 a->nr_allocs++; 700 a->nr_allocs++;
687 if (real_len > a->page_size / 2) 701 if (real_len > a->page_size / 2) {
688 a->pages_alloced += alloc->length >> a->page_shift; 702 a->pages_alloced += alloc->length >> a->page_shift;
703 }
689 alloc_unlock(__a); 704 alloc_unlock(__a);
690 705
691 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) 706 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
692 return alloc->base; 707 return alloc->base;
693 else 708 } else {
694 return (u64) (uintptr_t) alloc; 709 return (u64) (uintptr_t) alloc;
710 }
695} 711}
696 712
697/* 713/*
@@ -705,11 +721,12 @@ static void nvgpu_page_free(struct nvgpu_allocator *__a, u64 base)
705 721
706 alloc_lock(__a); 722 alloc_lock(__a);
707 723
708 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) 724 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
709 alloc = __find_page_alloc(a, base); 725 alloc = __find_page_alloc(a, base);
710 else 726 } else {
711 alloc = __find_page_alloc(a, 727 alloc = __find_page_alloc(a,
712 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base); 728 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base);
729 }
713 730
714 if (!alloc) { 731 if (!alloc) {
715 palloc_dbg(a, "Hrm, found no alloc?"); 732 palloc_dbg(a, "Hrm, found no alloc?");
@@ -743,8 +760,9 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
743 760
744 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache); 761 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
745 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl)); 762 sgl = nvgpu_kzalloc(a->owner->g, sizeof(*sgl));
746 if (!alloc || !sgl) 763 if (!alloc || !sgl) {
747 goto fail; 764 goto fail;
765 }
748 766
749 alloc->sgt.ops = &page_alloc_sgl_ops; 767 alloc->sgt.ops = &page_alloc_sgl_ops;
750 alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0); 768 alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length, 0);
@@ -765,10 +783,12 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
765 return alloc; 783 return alloc;
766 784
767fail: 785fail:
768 if (sgl) 786 if (sgl) {
769 nvgpu_kfree(a->owner->g, sgl); 787 nvgpu_kfree(a->owner->g, sgl);
770 if (alloc) 788 }
789 if (alloc) {
771 nvgpu_kmem_cache_free(a->alloc_cache, alloc); 790 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
791 }
772 return NULL; 792 return NULL;
773} 793}
774 794
@@ -813,10 +833,11 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
813 a->nr_fixed_allocs++; 833 a->nr_fixed_allocs++;
814 a->pages_alloced += pages; 834 a->pages_alloced += pages;
815 835
816 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) 836 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
817 return alloc->base; 837 return alloc->base;
818 else 838 } else {
819 return (u64) (uintptr_t) alloc; 839 return (u64) (uintptr_t) alloc;
840 }
820} 841}
821 842
822static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a, 843static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a,
@@ -829,8 +850,9 @@ static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a,
829 850
830 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) { 851 if (a->flags & GPU_ALLOC_NO_SCATTER_GATHER) {
831 alloc = __find_page_alloc(a, base); 852 alloc = __find_page_alloc(a, base);
832 if (!alloc) 853 if (!alloc) {
833 goto done; 854 goto done;
855 }
834 } else { 856 } else {
835 alloc = (struct nvgpu_page_alloc *) (uintptr_t) base; 857 alloc = (struct nvgpu_page_alloc *) (uintptr_t) base;
836 } 858 }
@@ -963,8 +985,9 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a)
963 a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner), 985 a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner),
964 nr_slabs, 986 nr_slabs,
965 sizeof(struct page_alloc_slab)); 987 sizeof(struct page_alloc_slab));
966 if (!a->slabs) 988 if (!a->slabs) {
967 return -ENOMEM; 989 return -ENOMEM;
990 }
968 a->nr_slabs = nr_slabs; 991 a->nr_slabs = nr_slabs;
969 992
970 for (i = 0; i < nr_slabs; i++) { 993 for (i = 0; i < nr_slabs; i++) {
@@ -990,16 +1013,19 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
990 char buddy_name[sizeof(__a->name)]; 1013 char buddy_name[sizeof(__a->name)];
991 int err; 1014 int err;
992 1015
993 if (blk_size < SZ_4K) 1016 if (blk_size < SZ_4K) {
994 return -EINVAL; 1017 return -EINVAL;
1018 }
995 1019
996 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator)); 1020 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator));
997 if (!a) 1021 if (!a) {
998 return -ENOMEM; 1022 return -ENOMEM;
1023 }
999 1024
1000 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &page_ops); 1025 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &page_ops);
1001 if (err) 1026 if (err) {
1002 goto fail; 1027 goto fail;
1028 }
1003 1029
1004 a->alloc_cache = nvgpu_kmem_cache_create(g, 1030 a->alloc_cache = nvgpu_kmem_cache_create(g,
1005 sizeof(struct nvgpu_page_alloc)); 1031 sizeof(struct nvgpu_page_alloc));
@@ -1020,16 +1046,18 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1020 1046
1021 if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) { 1047 if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) {
1022 err = nvgpu_page_alloc_init_slabs(a); 1048 err = nvgpu_page_alloc_init_slabs(a);
1023 if (err) 1049 if (err) {
1024 goto fail; 1050 goto fail;
1051 }
1025 } 1052 }
1026 1053
1027 snprintf(buddy_name, sizeof(buddy_name), "%s-src", name); 1054 snprintf(buddy_name, sizeof(buddy_name), "%s-src", name);
1028 1055
1029 err = nvgpu_buddy_allocator_init(g, &a->source_allocator, buddy_name, 1056 err = nvgpu_buddy_allocator_init(g, &a->source_allocator, buddy_name,
1030 base, length, blk_size, 0); 1057 base, length, blk_size, 0);
1031 if (err) 1058 if (err) {
1032 goto fail; 1059 goto fail;
1060 }
1033 1061
1034#ifdef CONFIG_DEBUG_FS 1062#ifdef CONFIG_DEBUG_FS
1035 nvgpu_init_alloc_debug(g, __a); 1063 nvgpu_init_alloc_debug(g, __a);
@@ -1044,10 +1072,12 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1044 return 0; 1072 return 0;
1045 1073
1046fail: 1074fail:
1047 if (a->alloc_cache) 1075 if (a->alloc_cache) {
1048 nvgpu_kmem_cache_destroy(a->alloc_cache); 1076 nvgpu_kmem_cache_destroy(a->alloc_cache);
1049 if (a->slab_page_cache) 1077 }
1078 if (a->slab_page_cache) {
1050 nvgpu_kmem_cache_destroy(a->slab_page_cache); 1079 nvgpu_kmem_cache_destroy(a->slab_page_cache);
1080 }
1051 nvgpu_kfree(g, a); 1081 nvgpu_kfree(g, a);
1052 return err; 1082 return err;
1053} 1083}
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index 84f45826..db48d168 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -95,8 +95,9 @@ int nvgpu_pd_cache_init(struct gk20a *g)
95 * This gets called from finalize_poweron() so we need to make sure we 95 * This gets called from finalize_poweron() so we need to make sure we
96 * don't reinit the pd_cache over and over. 96 * don't reinit the pd_cache over and over.
97 */ 97 */
98 if (g->mm.pd_cache) 98 if (g->mm.pd_cache) {
99 return 0; 99 return 0;
100 }
100 101
101 cache = nvgpu_kzalloc(g, sizeof(*cache)); 102 cache = nvgpu_kzalloc(g, sizeof(*cache));
102 if (!cache) { 103 if (!cache) {
@@ -123,8 +124,9 @@ void nvgpu_pd_cache_fini(struct gk20a *g)
123 int i; 124 int i;
124 struct nvgpu_pd_cache *cache = g->mm.pd_cache; 125 struct nvgpu_pd_cache *cache = g->mm.pd_cache;
125 126
126 if (!cache) 127 if (!cache) {
127 return; 128 return;
129 }
128 130
129 for (i = 0; i < NVGPU_PD_CACHE_COUNT; i++) { 131 for (i = 0; i < NVGPU_PD_CACHE_COUNT; i++) {
130 WARN_ON(!nvgpu_list_empty(&cache->full[i])); 132 WARN_ON(!nvgpu_list_empty(&cache->full[i]));
@@ -164,8 +166,9 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
164 * going to be virtually contiguous and we don't have to force the 166 * going to be virtually contiguous and we don't have to force the
165 * underlying allocations to be physically contiguous as well. 167 * underlying allocations to be physically contiguous as well.
166 */ 168 */
167 if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE) 169 if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE) {
168 flags = NVGPU_DMA_FORCE_CONTIGUOUS; 170 flags = NVGPU_DMA_FORCE_CONTIGUOUS;
171 }
169 172
170 err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem); 173 err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem);
171 if (err) { 174 if (err) {
@@ -244,8 +247,9 @@ static int nvgpu_pd_cache_alloc_from_partial(struct gk20a *g,
244 mem_offs = bit_offs * pentry->pd_size; 247 mem_offs = bit_offs * pentry->pd_size;
245 248
246 /* Bit map full. Somethings wrong. */ 249 /* Bit map full. Somethings wrong. */
247 if (WARN_ON(bit_offs >= ffz(pentry_mask))) 250 if (WARN_ON(bit_offs >= ffz(pentry_mask))) {
248 return -ENOMEM; 251 return -ENOMEM;
252 }
249 253
250 pentry->alloc_map |= 1 << bit_offs; 254 pentry->alloc_map |= 1 << bit_offs;
251 255
@@ -281,8 +285,9 @@ static struct nvgpu_pd_mem_entry *nvgpu_pd_cache_get_partial(
281 struct nvgpu_list_node *list = 285 struct nvgpu_list_node *list =
282 &cache->partial[nvgpu_pd_cache_nr(bytes)]; 286 &cache->partial[nvgpu_pd_cache_nr(bytes)];
283 287
284 if (nvgpu_list_empty(list)) 288 if (nvgpu_list_empty(list)) {
285 return NULL; 289 return NULL;
290 }
286 291
287 return nvgpu_list_first_entry(list, 292 return nvgpu_list_first_entry(list,
288 nvgpu_pd_mem_entry, 293 nvgpu_pd_mem_entry,
@@ -308,13 +313,15 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
308 } 313 }
309 314
310 pentry = nvgpu_pd_cache_get_partial(cache, bytes); 315 pentry = nvgpu_pd_cache_get_partial(cache, bytes);
311 if (!pentry) 316 if (!pentry) {
312 err = nvgpu_pd_cache_alloc_new(g, cache, pd, bytes); 317 err = nvgpu_pd_cache_alloc_new(g, cache, pd, bytes);
313 else 318 } else {
314 err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd); 319 err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd);
320 }
315 321
316 if (err) 322 if (err) {
317 nvgpu_err(g, "PD-Alloc [C] Failed!"); 323 nvgpu_err(g, "PD-Alloc [C] Failed!");
324 }
318 325
319 return err; 326 return err;
320} 327}
@@ -335,14 +342,16 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
335 */ 342 */
336 if (bytes >= PAGE_SIZE) { 343 if (bytes >= PAGE_SIZE) {
337 err = __nvgpu_pd_cache_alloc_direct(g, pd, bytes); 344 err = __nvgpu_pd_cache_alloc_direct(g, pd, bytes);
338 if (err) 345 if (err) {
339 return err; 346 return err;
347 }
340 348
341 return 0; 349 return 0;
342 } 350 }
343 351
344 if (WARN_ON(!g->mm.pd_cache)) 352 if (WARN_ON(!g->mm.pd_cache)) {
345 return -ENOMEM; 353 return -ENOMEM;
354 }
346 355
347 nvgpu_mutex_acquire(&g->mm.pd_cache->lock); 356 nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
348 err = nvgpu_pd_cache_alloc(g, g->mm.pd_cache, pd, bytes); 357 err = nvgpu_pd_cache_alloc(g, g->mm.pd_cache, pd, bytes);
@@ -355,8 +364,9 @@ void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
355{ 364{
356 pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); 365 pd_dbg(g, "PD-Free [D] 0x%p", pd->mem);
357 366
358 if (!pd->mem) 367 if (!pd->mem) {
359 return; 368 return;
369 }
360 370
361 nvgpu_dma_free(g, pd->mem); 371 nvgpu_dma_free(g, pd->mem);
362 nvgpu_kfree(g, pd->mem); 372 nvgpu_kfree(g, pd->mem);
@@ -407,8 +417,9 @@ static struct nvgpu_pd_mem_entry *nvgpu_pd_cache_look_up(
407 417
408 nvgpu_rbtree_search((u64)(uintptr_t)pd->mem, &node, 418 nvgpu_rbtree_search((u64)(uintptr_t)pd->mem, &node,
409 cache->mem_tree); 419 cache->mem_tree);
410 if (!node) 420 if (!node) {
411 return NULL; 421 return NULL;
422 }
412 423
413 return nvgpu_pd_mem_entry_from_tree_entry(node); 424 return nvgpu_pd_mem_entry_from_tree_entry(node);
414} 425}
@@ -436,8 +447,9 @@ void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
436 /* 447 /*
437 * Simple case: just DMA free. 448 * Simple case: just DMA free.
438 */ 449 */
439 if (!pd->cached) 450 if (!pd->cached) {
440 return __nvgpu_pd_cache_free_direct(g, pd); 451 return __nvgpu_pd_cache_free_direct(g, pd);
452 }
441 453
442 nvgpu_mutex_acquire(&g->mm.pd_cache->lock); 454 nvgpu_mutex_acquire(&g->mm.pd_cache->lock);
443 nvgpu_pd_cache_free(g, g->mm.pd_cache, pd); 455 nvgpu_pd_cache_free(g, g->mm.pd_cache, pd);
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index 5a28b7bc..b8fecbfc 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -34,8 +34,9 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr)
34 nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list, 34 nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list,
35 nvgpu_vm_area, vm_area_list) { 35 nvgpu_vm_area, vm_area_list) {
36 if (addr >= vm_area->addr && 36 if (addr >= vm_area->addr &&
37 addr < (u64)vm_area->addr + (u64)vm_area->size) 37 addr < (u64)vm_area->addr + (u64)vm_area->size) {
38 return vm_area; 38 return vm_area;
39 }
39 } 40 }
40 41
41 return NULL; 42 return NULL;
@@ -105,12 +106,14 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
105 page_size, pages, *addr, flags); 106 page_size, pages, *addr, flags);
106 107
107 for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) { 108 for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) {
108 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) 109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) {
109 break; 110 break;
111 }
110 } 112 }
111 113
112 if (pgsz_idx > gmmu_page_size_big) 114 if (pgsz_idx > gmmu_page_size_big) {
113 return -EINVAL; 115 return -EINVAL;
116 }
114 117
115 /* 118 /*
116 * pgsz_idx isn't likely to get too crazy, since it starts at 0 and 119 * pgsz_idx isn't likely to get too crazy, since it starts at 0 and
@@ -119,26 +122,30 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
119 */ 122 */
120 nvgpu_speculation_barrier(); 123 nvgpu_speculation_barrier();
121 124
122 if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) 125 if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) {
123 return -EINVAL; 126 return -EINVAL;
127 }
124 128
125 vm_area = nvgpu_kzalloc(g, sizeof(*vm_area)); 129 vm_area = nvgpu_kzalloc(g, sizeof(*vm_area));
126 if (!vm_area) 130 if (!vm_area) {
127 goto clean_up_err; 131 goto clean_up_err;
132 }
128 133
129 vma = vm->vma[pgsz_idx]; 134 vma = vm->vma[pgsz_idx];
130 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) 135 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) {
131 vaddr_start = nvgpu_alloc_fixed(vma, *addr, 136 vaddr_start = nvgpu_alloc_fixed(vma, *addr,
132 (u64)pages * 137 (u64)pages *
133 (u64)page_size, 138 (u64)page_size,
134 page_size); 139 page_size);
135 else 140 } else {
136 vaddr_start = nvgpu_alloc(vma, 141 vaddr_start = nvgpu_alloc(vma,
137 (u64)pages * 142 (u64)pages *
138 (u64)page_size); 143 (u64)page_size);
144 }
139 145
140 if (!vaddr_start) 146 if (!vaddr_start) {
141 goto clean_up_err; 147 goto clean_up_err;
148 }
142 149
143 vm_area->flags = flags; 150 vm_area->flags = flags;
144 vm_area->addr = vaddr_start; 151 vm_area->addr = vaddr_start;
@@ -179,10 +186,12 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
179 return 0; 186 return 0;
180 187
181clean_up_err: 188clean_up_err:
182 if (vaddr_start) 189 if (vaddr_start) {
183 nvgpu_free(vma, vaddr_start); 190 nvgpu_free(vma, vaddr_start);
184 if (vm_area) 191 }
192 if (vm_area) {
185 nvgpu_kfree(g, vm_area); 193 nvgpu_kfree(g, vm_area);
194 }
186 return -ENOMEM; 195 return -ENOMEM;
187} 196}
188 197
@@ -219,7 +228,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr)
219 } 228 }
220 229
221 /* if this was a sparse mapping, free the va */ 230 /* if this was a sparse mapping, free the va */
222 if (vm_area->sparse) 231 if (vm_area->sparse) {
223 g->ops.mm.gmmu_unmap(vm, 232 g->ops.mm.gmmu_unmap(vm,
224 vm_area->addr, 233 vm_area->addr,
225 vm_area->size, 234 vm_area->size,
@@ -228,6 +237,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr)
228 gk20a_mem_flag_none, 237 gk20a_mem_flag_none,
229 true, 238 true,
230 NULL); 239 NULL);
240 }
231 241
232 nvgpu_mutex_release(&vm->update_gmmu_lock); 242 nvgpu_mutex_release(&vm->update_gmmu_lock);
233 243