diff options
author | Srirangan <smadhavan@nvidia.com> | 2018-08-14 01:27:15 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-17 16:54:08 -0400 |
commit | 70c20bb75be7815ebc67ac82d6999f46bc25ed6d (patch) | |
tree | 19d6b5299e09b71e9afe2967a758f036bb9b79bc /drivers/gpu/nvgpu/common/mm/vm.c | |
parent | 553fdf3534f856edce73744fd54914b9b7a829cc (diff) |
gpu: nvgpu: common: mm: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces,
including single statement blocks. Fix errors due to single statement
if blocks without braces, introducing the braces.
JIRA NVGPU-671
Change-Id: Ieeecf719dca9acc1a116d2893637bf770caf4f5b
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1794241
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 123 |
1 files changed, 81 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 3b682e28..7d97b7b7 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -111,8 +111,9 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm, | |||
111 | 111 | ||
112 | __nvgpu_pd_cache_free_direct(g, pdb); | 112 | __nvgpu_pd_cache_free_direct(g, pdb); |
113 | 113 | ||
114 | if (!pdb->entries) | 114 | if (!pdb->entries) { |
115 | return; | 115 | return; |
116 | } | ||
116 | 117 | ||
117 | for (i = 0; i < pdb->num_entries; i++) { | 118 | for (i = 0; i < pdb->num_entries; i++) { |
118 | __nvgpu_vm_free_entries(vm, &pdb->entries[i], 1); | 119 | __nvgpu_vm_free_entries(vm, &pdb->entries[i], 1); |
@@ -204,8 +205,9 @@ int nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) | |||
204 | { | 205 | { |
205 | u64 mask = ((u64)vm->big_page_size << 10) - 1; | 206 | u64 mask = ((u64)vm->big_page_size << 10) - 1; |
206 | 207 | ||
207 | if (base & mask || size & mask) | 208 | if (base & mask || size & mask) { |
208 | return 0; | 209 | return 0; |
210 | } | ||
209 | return 1; | 211 | return 1; |
210 | } | 212 | } |
211 | 213 | ||
@@ -223,19 +225,23 @@ static int nvgpu_init_sema_pool(struct vm_gk20a *vm) | |||
223 | /* | 225 | /* |
224 | * Don't waste the memory on semaphores if we don't need them. | 226 | * Don't waste the memory on semaphores if we don't need them. |
225 | */ | 227 | */ |
226 | if (nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS)) | 228 | if (nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS)) { |
227 | return 0; | 229 | return 0; |
230 | } | ||
228 | 231 | ||
229 | if (vm->sema_pool) | 232 | if (vm->sema_pool) { |
230 | return 0; | 233 | return 0; |
234 | } | ||
231 | 235 | ||
232 | sema_sea = nvgpu_semaphore_sea_create(g); | 236 | sema_sea = nvgpu_semaphore_sea_create(g); |
233 | if (!sema_sea) | 237 | if (!sema_sea) { |
234 | return -ENOMEM; | 238 | return -ENOMEM; |
239 | } | ||
235 | 240 | ||
236 | err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool); | 241 | err = nvgpu_semaphore_pool_alloc(sema_sea, &vm->sema_pool); |
237 | if (err) | 242 | if (err) { |
238 | return err; | 243 | return err; |
244 | } | ||
239 | 245 | ||
240 | /* | 246 | /* |
241 | * Allocate a chunk of GPU VA space for mapping the semaphores. We will | 247 | * Allocate a chunk of GPU VA space for mapping the semaphores. We will |
@@ -287,11 +293,13 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
287 | u64 kernel_vma_start, kernel_vma_limit; | 293 | u64 kernel_vma_start, kernel_vma_limit; |
288 | struct gk20a *g = gk20a_from_mm(mm); | 294 | struct gk20a *g = gk20a_from_mm(mm); |
289 | 295 | ||
290 | if (WARN_ON(kernel_reserved + low_hole > aperture_size)) | 296 | if (WARN_ON(kernel_reserved + low_hole > aperture_size)) { |
291 | return -ENOMEM; | 297 | return -ENOMEM; |
298 | } | ||
292 | 299 | ||
293 | if (WARN_ON(vm->guest_managed && kernel_reserved != 0)) | 300 | if (WARN_ON(vm->guest_managed && kernel_reserved != 0)) { |
294 | return -EINVAL; | 301 | return -EINVAL; |
302 | } | ||
295 | 303 | ||
296 | nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, " | 304 | nvgpu_log_info(g, "Init space for %s: valimit=0x%llx, " |
297 | "LP size=0x%x lowhole=0x%llx", | 305 | "LP size=0x%x lowhole=0x%llx", |
@@ -308,8 +316,9 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
308 | vm->vma[gmmu_page_size_small] = &vm->user; | 316 | vm->vma[gmmu_page_size_small] = &vm->user; |
309 | vm->vma[gmmu_page_size_big] = &vm->user; | 317 | vm->vma[gmmu_page_size_big] = &vm->user; |
310 | vm->vma[gmmu_page_size_kernel] = &vm->kernel; | 318 | vm->vma[gmmu_page_size_kernel] = &vm->kernel; |
311 | if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) | 319 | if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { |
312 | vm->vma[gmmu_page_size_big] = &vm->user_lp; | 320 | vm->vma[gmmu_page_size_big] = &vm->user_lp; |
321 | } | ||
313 | 322 | ||
314 | vm->va_start = low_hole; | 323 | vm->va_start = low_hole; |
315 | vm->va_limit = aperture_size; | 324 | vm->va_limit = aperture_size; |
@@ -332,8 +341,9 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
332 | /* Initialize the page table data structures. */ | 341 | /* Initialize the page table data structures. */ |
333 | strncpy(vm->name, name, min(strlen(name), sizeof(vm->name))); | 342 | strncpy(vm->name, name, min(strlen(name), sizeof(vm->name))); |
334 | err = nvgpu_gmmu_init_page_table(vm); | 343 | err = nvgpu_gmmu_init_page_table(vm); |
335 | if (err) | 344 | if (err) { |
336 | goto clean_up_vgpu_vm; | 345 | goto clean_up_vgpu_vm; |
346 | } | ||
337 | 347 | ||
338 | /* Setup vma limits. */ | 348 | /* Setup vma limits. */ |
339 | if (kernel_reserved + low_hole < aperture_size) { | 349 | if (kernel_reserved + low_hole < aperture_size) { |
@@ -396,14 +406,15 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
396 | * Determine if big pages are possible in this VM. If a split address | 406 | * Determine if big pages are possible in this VM. If a split address |
397 | * space is used then check the user_lp vma instead of the user vma. | 407 | * space is used then check the user_lp vma instead of the user vma. |
398 | */ | 408 | */ |
399 | if (nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) | 409 | if (nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { |
400 | vm->big_pages = big_pages && | 410 | vm->big_pages = big_pages && |
401 | nvgpu_big_pages_possible(vm, user_vma_start, | 411 | nvgpu_big_pages_possible(vm, user_vma_start, |
402 | user_vma_limit - user_vma_start); | 412 | user_vma_limit - user_vma_start); |
403 | else | 413 | } else { |
404 | vm->big_pages = big_pages && | 414 | vm->big_pages = big_pages && |
405 | nvgpu_big_pages_possible(vm, user_lp_vma_start, | 415 | nvgpu_big_pages_possible(vm, user_lp_vma_start, |
406 | user_lp_vma_limit - user_lp_vma_start); | 416 | user_lp_vma_limit - user_lp_vma_start); |
417 | } | ||
407 | 418 | ||
408 | /* | 419 | /* |
409 | * User VMA. | 420 | * User VMA. |
@@ -418,8 +429,9 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
418 | SZ_4K, | 429 | SZ_4K, |
419 | GPU_BALLOC_MAX_ORDER, | 430 | GPU_BALLOC_MAX_ORDER, |
420 | GPU_ALLOC_GVA_SPACE); | 431 | GPU_ALLOC_GVA_SPACE); |
421 | if (err) | 432 | if (err) { |
422 | goto clean_up_page_tables; | 433 | goto clean_up_page_tables; |
434 | } | ||
423 | } else { | 435 | } else { |
424 | /* | 436 | /* |
425 | * Make these allocator pointers point to the kernel allocator | 437 | * Make these allocator pointers point to the kernel allocator |
@@ -443,8 +455,9 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
443 | vm->big_page_size, | 455 | vm->big_page_size, |
444 | GPU_BALLOC_MAX_ORDER, | 456 | GPU_BALLOC_MAX_ORDER, |
445 | GPU_ALLOC_GVA_SPACE); | 457 | GPU_ALLOC_GVA_SPACE); |
446 | if (err) | 458 | if (err) { |
447 | goto clean_up_allocators; | 459 | goto clean_up_allocators; |
460 | } | ||
448 | } | 461 | } |
449 | 462 | ||
450 | /* | 463 | /* |
@@ -458,8 +471,9 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
458 | SZ_4K, | 471 | SZ_4K, |
459 | GPU_BALLOC_MAX_ORDER, | 472 | GPU_BALLOC_MAX_ORDER, |
460 | kernel_vma_flags); | 473 | kernel_vma_flags); |
461 | if (err) | 474 | if (err) { |
462 | goto clean_up_allocators; | 475 | goto clean_up_allocators; |
476 | } | ||
463 | 477 | ||
464 | vm->mapped_buffers = NULL; | 478 | vm->mapped_buffers = NULL; |
465 | 479 | ||
@@ -475,19 +489,23 @@ int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
475 | */ | 489 | */ |
476 | if (vm->va_limit > 4ULL * SZ_1G) { | 490 | if (vm->va_limit > 4ULL * SZ_1G) { |
477 | err = nvgpu_init_sema_pool(vm); | 491 | err = nvgpu_init_sema_pool(vm); |
478 | if (err) | 492 | if (err) { |
479 | goto clean_up_allocators; | 493 | goto clean_up_allocators; |
494 | } | ||
480 | } | 495 | } |
481 | 496 | ||
482 | return 0; | 497 | return 0; |
483 | 498 | ||
484 | clean_up_allocators: | 499 | clean_up_allocators: |
485 | if (nvgpu_alloc_initialized(&vm->kernel)) | 500 | if (nvgpu_alloc_initialized(&vm->kernel)) { |
486 | nvgpu_alloc_destroy(&vm->kernel); | 501 | nvgpu_alloc_destroy(&vm->kernel); |
487 | if (nvgpu_alloc_initialized(&vm->user)) | 502 | } |
503 | if (nvgpu_alloc_initialized(&vm->user)) { | ||
488 | nvgpu_alloc_destroy(&vm->user); | 504 | nvgpu_alloc_destroy(&vm->user); |
489 | if (nvgpu_alloc_initialized(&vm->user_lp)) | 505 | } |
506 | if (nvgpu_alloc_initialized(&vm->user_lp)) { | ||
490 | nvgpu_alloc_destroy(&vm->user_lp); | 507 | nvgpu_alloc_destroy(&vm->user_lp); |
508 | } | ||
491 | clean_up_page_tables: | 509 | clean_up_page_tables: |
492 | /* Cleans up nvgpu_gmmu_init_page_table() */ | 510 | /* Cleans up nvgpu_gmmu_init_page_table() */ |
493 | __nvgpu_pd_cache_free_direct(g, &vm->pdb); | 511 | __nvgpu_pd_cache_free_direct(g, &vm->pdb); |
@@ -547,8 +565,9 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, | |||
547 | { | 565 | { |
548 | struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm)); | 566 | struct vm_gk20a *vm = nvgpu_kzalloc(g, sizeof(*vm)); |
549 | 567 | ||
550 | if (!vm) | 568 | if (!vm) { |
551 | return NULL; | 569 | return NULL; |
570 | } | ||
552 | 571 | ||
553 | if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole, | 572 | if (__nvgpu_vm_init(&g->mm, vm, big_page_size, low_hole, |
554 | kernel_reserved, aperture_size, big_pages, | 573 | kernel_reserved, aperture_size, big_pages, |
@@ -582,9 +601,10 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm) | |||
582 | } | 601 | } |
583 | } | 602 | } |
584 | 603 | ||
585 | if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va) | 604 | if (nvgpu_mem_is_valid(&g->syncpt_mem) && vm->syncpt_ro_map_gpu_va) { |
586 | nvgpu_gmmu_unmap(vm, &g->syncpt_mem, | 605 | nvgpu_gmmu_unmap(vm, &g->syncpt_mem, |
587 | vm->syncpt_ro_map_gpu_va); | 606 | vm->syncpt_ro_map_gpu_va); |
607 | } | ||
588 | 608 | ||
589 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 609 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
590 | 610 | ||
@@ -603,12 +623,15 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm) | |||
603 | nvgpu_kfree(vm->mm->g, vm_area); | 623 | nvgpu_kfree(vm->mm->g, vm_area); |
604 | } | 624 | } |
605 | 625 | ||
606 | if (nvgpu_alloc_initialized(&vm->kernel)) | 626 | if (nvgpu_alloc_initialized(&vm->kernel)) { |
607 | nvgpu_alloc_destroy(&vm->kernel); | 627 | nvgpu_alloc_destroy(&vm->kernel); |
608 | if (nvgpu_alloc_initialized(&vm->user)) | 628 | } |
629 | if (nvgpu_alloc_initialized(&vm->user)) { | ||
609 | nvgpu_alloc_destroy(&vm->user); | 630 | nvgpu_alloc_destroy(&vm->user); |
610 | if (nvgpu_alloc_initialized(&vm->user_lp)) | 631 | } |
632 | if (nvgpu_alloc_initialized(&vm->user_lp)) { | ||
611 | nvgpu_alloc_destroy(&vm->user_lp); | 633 | nvgpu_alloc_destroy(&vm->user_lp); |
634 | } | ||
612 | 635 | ||
613 | nvgpu_vm_free_entries(vm, &vm->pdb); | 636 | nvgpu_vm_free_entries(vm, &vm->pdb); |
614 | 637 | ||
@@ -664,8 +687,9 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf( | |||
664 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; | 687 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; |
665 | 688 | ||
666 | nvgpu_rbtree_search(addr, &node, root); | 689 | nvgpu_rbtree_search(addr, &node, root); |
667 | if (!node) | 690 | if (!node) { |
668 | return NULL; | 691 | return NULL; |
692 | } | ||
669 | 693 | ||
670 | return mapped_buffer_from_rbtree_node(node); | 694 | return mapped_buffer_from_rbtree_node(node); |
671 | } | 695 | } |
@@ -677,8 +701,9 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range( | |||
677 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; | 701 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; |
678 | 702 | ||
679 | nvgpu_rbtree_range_search(addr, &node, root); | 703 | nvgpu_rbtree_range_search(addr, &node, root); |
680 | if (!node) | 704 | if (!node) { |
681 | return NULL; | 705 | return NULL; |
706 | } | ||
682 | 707 | ||
683 | return mapped_buffer_from_rbtree_node(node); | 708 | return mapped_buffer_from_rbtree_node(node); |
684 | } | 709 | } |
@@ -690,8 +715,9 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than( | |||
690 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; | 715 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; |
691 | 716 | ||
692 | nvgpu_rbtree_less_than_search(addr, &node, root); | 717 | nvgpu_rbtree_less_than_search(addr, &node, root); |
693 | if (!node) | 718 | if (!node) { |
694 | return NULL; | 719 | return NULL; |
720 | } | ||
695 | 721 | ||
696 | return mapped_buffer_from_rbtree_node(node); | 722 | return mapped_buffer_from_rbtree_node(node); |
697 | } | 723 | } |
@@ -746,8 +772,9 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | |||
746 | int i; | 772 | int i; |
747 | struct vm_gk20a_mapping_batch batch; | 773 | struct vm_gk20a_mapping_batch batch; |
748 | 774 | ||
749 | if (num_buffers == 0) | 775 | if (num_buffers == 0) { |
750 | return; | 776 | return; |
777 | } | ||
751 | 778 | ||
752 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 779 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
753 | nvgpu_vm_mapping_batch_start(&batch); | 780 | nvgpu_vm_mapping_batch_start(&batch); |
@@ -814,10 +841,11 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
814 | compr_kind : NVGPU_KIND_INVALID); | 841 | compr_kind : NVGPU_KIND_INVALID); |
815 | binfo.incompr_kind = incompr_kind; | 842 | binfo.incompr_kind = incompr_kind; |
816 | 843 | ||
817 | if (compr_kind != NVGPU_KIND_INVALID) | 844 | if (compr_kind != NVGPU_KIND_INVALID) { |
818 | map_key_kind = compr_kind; | 845 | map_key_kind = compr_kind; |
819 | else | 846 | } else { |
820 | map_key_kind = incompr_kind; | 847 | map_key_kind = incompr_kind; |
848 | } | ||
821 | 849 | ||
822 | /* | 850 | /* |
823 | * Check if this buffer is already mapped. | 851 | * Check if this buffer is already mapped. |
@@ -847,11 +875,12 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
847 | } | 875 | } |
848 | 876 | ||
849 | align = nvgpu_sgt_alignment(g, sgt); | 877 | align = nvgpu_sgt_alignment(g, sgt); |
850 | if (g->mm.disable_bigpage) | 878 | if (g->mm.disable_bigpage) { |
851 | binfo.pgsz_idx = gmmu_page_size_small; | 879 | binfo.pgsz_idx = gmmu_page_size_small; |
852 | else | 880 | } else { |
853 | binfo.pgsz_idx = __get_pte_size(vm, map_addr, | 881 | binfo.pgsz_idx = __get_pte_size(vm, map_addr, |
854 | min_t(u64, binfo.size, align)); | 882 | min_t(u64, binfo.size, align)); |
883 | } | ||
855 | map_size = map_size ? map_size : binfo.size; | 884 | map_size = map_size ? map_size : binfo.size; |
856 | map_size = ALIGN(map_size, SZ_4K); | 885 | map_size = ALIGN(map_size, SZ_4K); |
857 | 886 | ||
@@ -872,8 +901,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
872 | map_size, | 901 | map_size, |
873 | binfo.pgsz_idx, | 902 | binfo.pgsz_idx, |
874 | &vm_area); | 903 | &vm_area); |
875 | if (err) | 904 | if (err) { |
876 | goto clean_up; | 905 | goto clean_up; |
906 | } | ||
877 | 907 | ||
878 | va_allocated = false; | 908 | va_allocated = false; |
879 | } | 909 | } |
@@ -941,8 +971,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
941 | comptags.lines - 1)); | 971 | comptags.lines - 1)); |
942 | gk20a_comptags_finish_clear( | 972 | gk20a_comptags_finish_clear( |
943 | os_buf, err == 0); | 973 | os_buf, err == 0); |
944 | if (err) | 974 | if (err) { |
945 | goto clean_up; | 975 | goto clean_up; |
976 | } | ||
946 | } | 977 | } |
947 | } else { | 978 | } else { |
948 | /* | 979 | /* |
@@ -955,8 +986,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
955 | /* | 986 | /* |
956 | * Store the ctag offset for later use if we got the comptags | 987 | * Store the ctag offset for later use if we got the comptags |
957 | */ | 988 | */ |
958 | if (comptags.lines) | 989 | if (comptags.lines) { |
959 | ctag_offset = comptags.offset; | 990 | ctag_offset = comptags.offset; |
991 | } | ||
960 | } | 992 | } |
961 | 993 | ||
962 | /* | 994 | /* |
@@ -984,8 +1016,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
984 | goto clean_up; | 1016 | goto clean_up; |
985 | } | 1017 | } |
986 | 1018 | ||
987 | if (clear_ctags) | 1019 | if (clear_ctags) { |
988 | clear_ctags = gk20a_comptags_start_clear(os_buf); | 1020 | clear_ctags = gk20a_comptags_start_clear(os_buf); |
1021 | } | ||
989 | 1022 | ||
990 | map_addr = g->ops.mm.gmmu_map(vm, | 1023 | map_addr = g->ops.mm.gmmu_map(vm, |
991 | map_addr, | 1024 | map_addr, |
@@ -1003,8 +1036,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
1003 | batch, | 1036 | batch, |
1004 | aperture); | 1037 | aperture); |
1005 | 1038 | ||
1006 | if (clear_ctags) | 1039 | if (clear_ctags) { |
1007 | gk20a_comptags_finish_clear(os_buf, map_addr != 0); | 1040 | gk20a_comptags_finish_clear(os_buf, map_addr != 0); |
1041 | } | ||
1008 | 1042 | ||
1009 | if (!map_addr) { | 1043 | if (!map_addr) { |
1010 | err = -ENOMEM; | 1044 | err = -ENOMEM; |
@@ -1041,7 +1075,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
1041 | return mapped_buffer; | 1075 | return mapped_buffer; |
1042 | 1076 | ||
1043 | clean_up: | 1077 | clean_up: |
1044 | if (mapped_buffer->addr) | 1078 | if (mapped_buffer->addr) { |
1045 | g->ops.mm.gmmu_unmap(vm, | 1079 | g->ops.mm.gmmu_unmap(vm, |
1046 | mapped_buffer->addr, | 1080 | mapped_buffer->addr, |
1047 | mapped_buffer->size, | 1081 | mapped_buffer->size, |
@@ -1051,6 +1085,7 @@ clean_up: | |||
1051 | mapped_buffer->vm_area ? | 1085 | mapped_buffer->vm_area ? |
1052 | mapped_buffer->vm_area->sparse : false, | 1086 | mapped_buffer->vm_area->sparse : false, |
1053 | NULL); | 1087 | NULL); |
1088 | } | ||
1054 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 1089 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
1055 | clean_up_nolock: | 1090 | clean_up_nolock: |
1056 | nvgpu_kfree(g, mapped_buffer); | 1091 | nvgpu_kfree(g, mapped_buffer); |
@@ -1132,14 +1167,16 @@ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm, | |||
1132 | nvgpu_timeout_init(vm->mm->g, &timeout, 50, NVGPU_TIMER_CPU_TIMER); | 1167 | nvgpu_timeout_init(vm->mm->g, &timeout, 50, NVGPU_TIMER_CPU_TIMER); |
1133 | 1168 | ||
1134 | do { | 1169 | do { |
1135 | if (nvgpu_atomic_read(&mapped_buffer->ref.refcount) == 1) | 1170 | if (nvgpu_atomic_read(&mapped_buffer->ref.refcount) == 1) { |
1136 | break; | 1171 | break; |
1172 | } | ||
1137 | nvgpu_msleep(10); | 1173 | nvgpu_msleep(10); |
1138 | } while (!nvgpu_timeout_expired_msg(&timeout, | 1174 | } while (!nvgpu_timeout_expired_msg(&timeout, |
1139 | "sync-unmap failed on 0x%llx")); | 1175 | "sync-unmap failed on 0x%llx")); |
1140 | 1176 | ||
1141 | if (nvgpu_timeout_expired(&timeout)) | 1177 | if (nvgpu_timeout_expired(&timeout)) { |
1142 | ret = -ETIMEDOUT; | 1178 | ret = -ETIMEDOUT; |
1179 | } | ||
1143 | 1180 | ||
1144 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 1181 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1145 | 1182 | ||
@@ -1154,16 +1191,18 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, | |||
1154 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 1191 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
1155 | 1192 | ||
1156 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); | 1193 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); |
1157 | if (!mapped_buffer) | 1194 | if (!mapped_buffer) { |
1158 | goto done; | 1195 | goto done; |
1196 | } | ||
1159 | 1197 | ||
1160 | if (mapped_buffer->flags & NVGPU_VM_MAP_FIXED_OFFSET) { | 1198 | if (mapped_buffer->flags & NVGPU_VM_MAP_FIXED_OFFSET) { |
1161 | if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer)) | 1199 | if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer)) { |
1162 | /* | 1200 | /* |
1163 | * Looks like we have failed... Better not continue in | 1201 | * Looks like we have failed... Better not continue in |
1164 | * case the buffer is in use. | 1202 | * case the buffer is in use. |
1165 | */ | 1203 | */ |
1166 | goto done; | 1204 | goto done; |
1205 | } | ||
1167 | } | 1206 | } |
1168 | 1207 | ||
1169 | /* | 1208 | /* |