summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 01:27:15 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-17 16:54:08 -0400
commit70c20bb75be7815ebc67ac82d6999f46bc25ed6d (patch)
tree19d6b5299e09b71e9afe2967a758f036bb9b79bc /drivers/gpu/nvgpu/common/mm/gmmu.c
parent553fdf3534f856edce73744fd54914b9b7a829cc (diff)
gpu: nvgpu: common: mm: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: Ieeecf719dca9acc1a116d2893637bf770caf4f5b Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1794241 GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c98
1 files changed, 64 insertions, 34 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index f6f2b9ad..498cdf06 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -38,18 +38,20 @@
38 38
39#define __gmmu_dbg(g, attrs, fmt, args...) \ 39#define __gmmu_dbg(g, attrs, fmt, args...) \
40 do { \ 40 do { \
41 if (attrs->debug) \ 41 if (attrs->debug) { \
42 nvgpu_info(g, fmt, ##args); \ 42 nvgpu_info(g, fmt, ##args); \
43 else \ 43 } else { \
44 nvgpu_log(g, gpu_dbg_map, fmt, ##args); \ 44 nvgpu_log(g, gpu_dbg_map, fmt, ##args); \
45 } \
45 } while (0) 46 } while (0)
46 47
47#define __gmmu_dbg_v(g, attrs, fmt, args...) \ 48#define __gmmu_dbg_v(g, attrs, fmt, args...) \
48 do { \ 49 do { \
49 if (attrs->debug) \ 50 if (attrs->debug) { \
50 nvgpu_info(g, fmt, ##args); \ 51 nvgpu_info(g, fmt, ##args); \
51 else \ 52 } else { \
52 nvgpu_log(g, gpu_dbg_map_v, fmt, ##args); \ 53 nvgpu_log(g, gpu_dbg_map_v, fmt, ##args); \
54 } \
53 } while (0) 55 } while (0)
54 56
55static int pd_allocate(struct vm_gk20a *vm, 57static int pd_allocate(struct vm_gk20a *vm,
@@ -77,15 +79,17 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
77 79
78 struct nvgpu_sgt *sgt = nvgpu_sgt_create_from_mem(g, mem); 80 struct nvgpu_sgt *sgt = nvgpu_sgt_create_from_mem(g, mem);
79 81
80 if (!sgt) 82 if (!sgt) {
81 return -ENOMEM; 83 return -ENOMEM;
84 }
82 85
83 /* 86 /*
84 * If the GPU is IO coherent and the DMA API is giving us IO coherent 87 * If the GPU is IO coherent and the DMA API is giving us IO coherent
85 * CPU mappings then we gotta make sure we use the IO coherent aperture. 88 * CPU mappings then we gotta make sure we use the IO coherent aperture.
86 */ 89 */
87 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) 90 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) {
88 flags |= NVGPU_VM_MAP_IO_COHERENT; 91 flags |= NVGPU_VM_MAP_IO_COHERENT;
92 }
89 93
90 /* 94 /*
91 * Later on, when we free this nvgpu_mem's GPU mapping, we are going to 95 * Later on, when we free this nvgpu_mem's GPU mapping, we are going to
@@ -94,10 +98,11 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
94 * therefor we should not try and free it. But otherwise, if we do 98 * therefor we should not try and free it. But otherwise, if we do
95 * manage the VA alloc, we obviously must free it. 99 * manage the VA alloc, we obviously must free it.
96 */ 100 */
97 if (addr != 0) 101 if (addr != 0) {
98 mem->free_gpu_va = false; 102 mem->free_gpu_va = false;
99 else 103 } else {
100 mem->free_gpu_va = true; 104 mem->free_gpu_va = true;
105 }
101 106
102 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 107 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
103 vaddr = g->ops.mm.gmmu_map(vm, addr, 108 vaddr = g->ops.mm.gmmu_map(vm, addr,
@@ -196,8 +201,9 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
196 pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE); 201 pdb_size = ALIGN(pd_size(&vm->mmu_levels[0], &attrs), PAGE_SIZE);
197 202
198 err = __nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size); 203 err = __nvgpu_pd_cache_alloc_direct(vm->mm->g, &vm->pdb, pdb_size);
199 if (WARN_ON(err)) 204 if (WARN_ON(err)) {
200 return err; 205 return err;
206 }
201 207
202 /* 208 /*
203 * One nvgpu_mb() is done after all mapping operations. Don't need 209 * One nvgpu_mb() is done after all mapping operations. Don't need
@@ -267,8 +273,9 @@ static int pd_allocate(struct vm_gk20a *vm,
267{ 273{
268 int err; 274 int err;
269 275
270 if (pd->mem) 276 if (pd->mem) {
271 return 0; 277 return 0;
278 }
272 279
273 err = __nvgpu_pd_alloc(vm, pd, pd_size(l, attrs)); 280 err = __nvgpu_pd_alloc(vm, pd, pd_size(l, attrs));
274 if (err) { 281 if (err) {
@@ -310,14 +317,16 @@ static int pd_allocate_children(struct vm_gk20a *vm,
310{ 317{
311 struct gk20a *g = gk20a_from_vm(vm); 318 struct gk20a *g = gk20a_from_vm(vm);
312 319
313 if (pd->entries) 320 if (pd->entries) {
314 return 0; 321 return 0;
322 }
315 323
316 pd->num_entries = pd_entries(l, attrs); 324 pd->num_entries = pd_entries(l, attrs);
317 pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) * 325 pd->entries = nvgpu_vzalloc(g, sizeof(struct nvgpu_gmmu_pd) *
318 pd->num_entries); 326 pd->num_entries);
319 if (!pd->entries) 327 if (!pd->entries) {
320 return -ENOMEM; 328 return -ENOMEM;
329 }
321 330
322 return 0; 331 return 0;
323} 332}
@@ -398,8 +407,9 @@ static int __set_pd_level(struct vm_gk20a *vm,
398 * have a bunch of children PDs. 407 * have a bunch of children PDs.
399 */ 408 */
400 if (next_l->update_entry) { 409 if (next_l->update_entry) {
401 if (pd_allocate_children(vm, l, pd, attrs)) 410 if (pd_allocate_children(vm, l, pd, attrs)) {
402 return -ENOMEM; 411 return -ENOMEM;
412 }
403 413
404 /* 414 /*
405 * Get the next PD so that we know what to put in this 415 * Get the next PD so that we know what to put in this
@@ -412,8 +422,9 @@ static int __set_pd_level(struct vm_gk20a *vm,
412 /* 422 /*
413 * Allocate the backing memory for next_pd. 423 * Allocate the backing memory for next_pd.
414 */ 424 */
415 if (pd_allocate(vm, next_pd, next_l, attrs)) 425 if (pd_allocate(vm, next_pd, next_l, attrs)) {
416 return -ENOMEM; 426 return -ENOMEM;
427 }
417 } 428 }
418 429
419 /* 430 /*
@@ -440,8 +451,9 @@ static int __set_pd_level(struct vm_gk20a *vm,
440 chunk_size, 451 chunk_size,
441 attrs); 452 attrs);
442 453
443 if (err) 454 if (err) {
444 return err; 455 return err;
456 }
445 } 457 }
446 458
447 virt_addr += chunk_size; 459 virt_addr += chunk_size;
@@ -452,8 +464,9 @@ static int __set_pd_level(struct vm_gk20a *vm,
452 * non-zero phys addresses in the PTEs. A non-zero phys-addr 464 * non-zero phys addresses in the PTEs. A non-zero phys-addr
453 * would also confuse the lower level PTE programming code. 465 * would also confuse the lower level PTE programming code.
454 */ 466 */
455 if (phys_addr) 467 if (phys_addr) {
456 phys_addr += chunk_size; 468 phys_addr += chunk_size;
469 }
457 length -= chunk_size; 470 length -= chunk_size;
458 } 471 }
459 472
@@ -547,8 +560,9 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
547 virt_addr, 560 virt_addr,
548 chunk_length, 561 chunk_length,
549 attrs); 562 attrs);
550 if (err) 563 if (err) {
551 break; 564 break;
565 }
552 566
553 /* Space has been skipped so zero this for future chunks. */ 567 /* Space has been skipped so zero this for future chunks. */
554 space_to_skip = 0; 568 space_to_skip = 0;
@@ -559,8 +573,9 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
559 virt_addr += chunk_length; 573 virt_addr += chunk_length;
560 length -= chunk_length; 574 length -= chunk_length;
561 575
562 if (length == 0) 576 if (length == 0) {
563 break; 577 break;
578 }
564 } 579 }
565 580
566 return err; 581 return err;
@@ -594,13 +609,15 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
594 609
595 /* note: here we need to map kernel to small, since the 610 /* note: here we need to map kernel to small, since the
596 * low-level mmu code assumes 0 is small and 1 is big pages */ 611 * low-level mmu code assumes 0 is small and 1 is big pages */
597 if (attrs->pgsz == gmmu_page_size_kernel) 612 if (attrs->pgsz == gmmu_page_size_kernel) {
598 attrs->pgsz = gmmu_page_size_small; 613 attrs->pgsz = gmmu_page_size_small;
614 }
599 615
600 page_size = vm->gmmu_page_sizes[attrs->pgsz]; 616 page_size = vm->gmmu_page_sizes[attrs->pgsz];
601 617
602 if (space_to_skip & (page_size - 1)) 618 if (space_to_skip & (page_size - 1)) {
603 return -EINVAL; 619 return -EINVAL;
620 }
604 621
605 /* 622 /*
606 * Update length to be aligned to the passed page size. 623 * Update length to be aligned to the passed page size.
@@ -692,8 +709,9 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
692 * the programmed ctagline gets increased at compression_page_size 709 * the programmed ctagline gets increased at compression_page_size
693 * boundaries. 710 * boundaries.
694 */ 711 */
695 if (attrs.ctag) 712 if (attrs.ctag) {
696 attrs.ctag += buffer_offset & (ctag_granularity - 1U); 713 attrs.ctag += buffer_offset & (ctag_granularity - 1U);
714 }
697 715
698 attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC); 716 attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
699 717
@@ -701,8 +719,9 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
701 * Handle the IO coherency aperture: make sure the .aperture field is 719 * Handle the IO coherency aperture: make sure the .aperture field is
702 * correct based on the IO coherency flag. 720 * correct based on the IO coherency flag.
703 */ 721 */
704 if (attrs.coherent && attrs.aperture == APERTURE_SYSMEM) 722 if (attrs.coherent && attrs.aperture == APERTURE_SYSMEM) {
705 attrs.aperture = __APERTURE_SYSMEM_COH; 723 attrs.aperture = __APERTURE_SYSMEM_COH;
724 }
706 725
707 /* 726 /*
708 * Only allocate a new GPU VA range if we haven't already been passed a 727 * Only allocate a new GPU VA range if we haven't already been passed a
@@ -725,16 +744,18 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
725 goto fail_validate; 744 goto fail_validate;
726 } 745 }
727 746
728 if (!batch) 747 if (!batch) {
729 g->ops.fb.tlb_invalidate(g, vm->pdb.mem); 748 g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
730 else 749 } else {
731 batch->need_tlb_invalidate = true; 750 batch->need_tlb_invalidate = true;
751 }
732 752
733 return vaddr; 753 return vaddr;
734 754
735fail_validate: 755fail_validate:
736 if (allocated) 756 if (allocated) {
737 __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); 757 __nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
758 }
738fail_alloc: 759fail_alloc:
739 nvgpu_err(g, "%s: failed with err=%d", __func__, err); 760 nvgpu_err(g, "%s: failed with err=%d", __func__, err);
740 return 0; 761 return 0;
@@ -775,8 +796,9 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
775 /* unmap here needs to know the page size we assigned at mapping */ 796 /* unmap here needs to know the page size we assigned at mapping */
776 err = __nvgpu_gmmu_update_page_table(vm, NULL, 0, 797 err = __nvgpu_gmmu_update_page_table(vm, NULL, 0,
777 vaddr, size, &attrs); 798 vaddr, size, &attrs);
778 if (err) 799 if (err) {
779 nvgpu_err(g, "failed to update gmmu ptes on unmap"); 800 nvgpu_err(g, "failed to update gmmu ptes on unmap");
801 }
780 802
781 if (!batch) { 803 if (!batch) {
782 gk20a_mm_l2_flush(g, true); 804 gk20a_mm_l2_flush(g, true);
@@ -801,8 +823,9 @@ u32 __nvgpu_pte_words(struct gk20a *g)
801 */ 823 */
802 do { 824 do {
803 next_l = l + 1; 825 next_l = l + 1;
804 if (!next_l->update_entry) 826 if (!next_l->update_entry) {
805 break; 827 break;
828 }
806 829
807 l++; 830 l++;
808 } while (true); 831 } while (true);
@@ -836,13 +859,15 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
836 struct nvgpu_gmmu_pd *pd_next = pd->entries + pd_idx; 859 struct nvgpu_gmmu_pd *pd_next = pd->entries + pd_idx;
837 860
838 /* Invalid entry! */ 861 /* Invalid entry! */
839 if (!pd_next->mem) 862 if (!pd_next->mem) {
840 return -EINVAL; 863 return -EINVAL;
864 }
841 865
842 attrs->pgsz = l->get_pgsz(g, l, pd, pd_idx); 866 attrs->pgsz = l->get_pgsz(g, l, pd, pd_idx);
843 867
844 if (attrs->pgsz >= gmmu_nr_page_sizes) 868 if (attrs->pgsz >= gmmu_nr_page_sizes) {
845 return -EINVAL; 869 return -EINVAL;
870 }
846 871
847 return __nvgpu_locate_pte(g, vm, pd_next, 872 return __nvgpu_locate_pte(g, vm, pd_next,
848 vaddr, lvl + 1, attrs, 873 vaddr, lvl + 1, attrs,
@@ -850,8 +875,9 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
850 pd_offs_out); 875 pd_offs_out);
851 } 876 }
852 877
853 if (!pd->mem) 878 if (!pd->mem) {
854 return -EINVAL; 879 return -EINVAL;
880 }
855 881
856 /* 882 /*
857 * Take into account the real offset into the nvgpu_mem since the PD 883 * Take into account the real offset into the nvgpu_mem since the PD
@@ -867,14 +893,17 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm,
867 } 893 }
868 } 894 }
869 895
870 if (pd_out) 896 if (pd_out) {
871 *pd_out = pd; 897 *pd_out = pd;
898 }
872 899
873 if (pd_idx_out) 900 if (pd_idx_out) {
874 *pd_idx_out = pd_idx; 901 *pd_idx_out = pd_idx;
902 }
875 903
876 if (pd_offs_out) 904 if (pd_offs_out) {
877 *pd_offs_out = pd_offset_from_index(l, pd_idx); 905 *pd_offs_out = pd_offset_from_index(l, pd_idx);
906 }
878 907
879 return 0; 908 return 0;
880} 909}
@@ -903,8 +932,9 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)
903 err = __nvgpu_locate_pte(g, vm, &vm->pdb, 932 err = __nvgpu_locate_pte(g, vm, &vm->pdb,
904 vaddr, 0, &attrs, 933 vaddr, 0, &attrs,
905 NULL, &pd, &pd_idx, &pd_offs); 934 NULL, &pd, &pd_idx, &pd_offs);
906 if (err) 935 if (err) {
907 return err; 936 return err;
937 }
908 938
909 pte_size = __nvgpu_pte_words(g); 939 pte_size = __nvgpu_pte_words(g);
910 940