summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-07-02 20:14:27 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-09 20:20:43 -0400
commit3cf92ec89ba8deac77d726f02d79cba7c0e73e4d (patch)
tree9b20d30e81ab45d85eddbdf1cdd36901fd5a4ad2 /drivers/gpu/nvgpu/common/mm
parent2dd9bb03dd56ca86b0e61b89fab38d38a58ecddf (diff)
gpu: nvgpu: Fix several issues with the buddy allocator
The issues are: 1. Non-fixed allocs must take into account explicit PTE size requests. Previously the PTE size was determines from the allocation size which was incorect. To do this, the PTE size is now plumbed through all GPU VA allocations. This is what the new alloc_pte() op does. 2. Fix buddy PTE size assignment. This changes a '<=' into a '<' in the buddy allocation logic. Effectively this is now leaving the PTE size for buddy blocks equal to the PDE block size as 'ANY'. This prevents a buddy block of PDE size which has yet to be allocated from having a specific PDE size. Without this its possible to do a fixed alloc that fails unexpectedly due to mismatching PDE sizes. Consider two PDE block sized fixed allocs that are contained in one buddy twice the size of a PDE block. Let's call these fixed allocs S and B (small and big). Let's assume that two fixed allocs are done, each targeting S and B, in that order. With the current logic the first alloc, when we create the two buddies S and B, causes both S and B to have a PTE size of SMALL. Now when the second alloc happens we attempt to find a buddy B with a PTE size of either BIG or ANY. But we cannot becasue B already has size SMALL. This casues us to appear like we have a conflicting fixed alloc despite this not being the case. 3. Misc cleanups & bug fixes: - Clean up some MISRA issues - Delete an extraneous unlock that could have caused a deadlock. Bug 200105199 Change-Id: Ib5447ec6705a5a289ac0cf3d5e90c79b5d67582d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1768582 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c108
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h18
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c5
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c2
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c24
5 files changed, 106 insertions, 51 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index a9f90069..b29045ba 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -64,6 +64,25 @@ static void __balloc_do_free_fixed(struct nvgpu_buddy_allocator *a,
64 * easily PDE aligned so this hasn't been a problem. 64 * easily PDE aligned so this hasn't been a problem.
65 */ 65 */
66 66
67static u32 nvgpu_balloc_page_size_to_pte_size(struct nvgpu_buddy_allocator *a,
68 u32 page_size)
69{
70 if ((a->flags & GPU_ALLOC_GVA_SPACE) == 0ULL) {
71 return BALLOC_PTE_SIZE_ANY;
72 }
73
74 /*
75 * Make sure the page size is actually valid!
76 */
77 if (page_size == a->vm->big_page_size) {
78 return BALLOC_PTE_SIZE_BIG;
79 } else if (page_size == SZ_4K) {
80 return BALLOC_PTE_SIZE_SMALL;
81 } else {
82 return BALLOC_PTE_SIZE_INVALID;
83 }
84}
85
67/* 86/*
68 * Pick a suitable maximum order for this allocator. 87 * Pick a suitable maximum order for this allocator.
69 * 88 *
@@ -142,7 +161,7 @@ static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a,
142 * without cycling through the entire list. 161 * without cycling through the entire list.
143 */ 162 */
144 if (a->flags & GPU_ALLOC_GVA_SPACE && 163 if (a->flags & GPU_ALLOC_GVA_SPACE &&
145 b->pte_size == GMMU_PAGE_SIZE_BIG) { 164 b->pte_size == BALLOC_PTE_SIZE_BIG) {
146 nvgpu_list_add_tail(&b->buddy_entry, list); 165 nvgpu_list_add_tail(&b->buddy_entry, list);
147 } else { 166 } else {
148 nvgpu_list_add(&b->buddy_entry, list); 167 nvgpu_list_add(&b->buddy_entry, list);
@@ -387,7 +406,7 @@ static void balloc_coalesce(struct nvgpu_buddy_allocator *a,
387 * @a must be locked. 406 * @a must be locked.
388 */ 407 */
389static int balloc_split_buddy(struct nvgpu_buddy_allocator *a, 408static int balloc_split_buddy(struct nvgpu_buddy_allocator *a,
390 struct nvgpu_buddy *b, int pte_size) 409 struct nvgpu_buddy *b, u32 pte_size)
391{ 410{
392 struct nvgpu_buddy *left, *right; 411 struct nvgpu_buddy *left, *right;
393 u64 half; 412 u64 half;
@@ -415,9 +434,22 @@ static int balloc_split_buddy(struct nvgpu_buddy_allocator *a,
415 left->parent = b; 434 left->parent = b;
416 right->parent = b; 435 right->parent = b;
417 436
418 /* PTE considerations. */ 437 /*
438 * Potentially assign a PTE size to the new buddies. The obvious case is
439 * when we don't have a GPU VA space; just leave it alone. When we do
440 * have a GVA space we need to assign the passed PTE size to the buddy
441 * only if the buddy is less than the PDE block size. This is because if
442 * the buddy is less than the PDE block size then the buddy's parent
443 * may already have a PTE size. Thus we can only allocate this buddy to
444 * mappings with that PTE size (due to the large/small PTE separation
445 * requirement).
446 *
447 * When the buddy size is greater than or equal to the block size then
448 * we can leave the buddies PTE field alone since the PDE block has yet
449 * to be assigned a PTE size.
450 */
419 if (a->flags & GPU_ALLOC_GVA_SPACE && 451 if (a->flags & GPU_ALLOC_GVA_SPACE &&
420 left->order <= a->pte_blk_order) { 452 left->order < a->pte_blk_order) {
421 left->pte_size = pte_size; 453 left->pte_size = pte_size;
422 right->pte_size = pte_size; 454 right->pte_size = pte_size;
423 } 455 }
@@ -477,7 +509,7 @@ static struct nvgpu_buddy *balloc_free_buddy(struct nvgpu_buddy_allocator *a,
477 * Find a suitable buddy for the given order and PTE type (big or little). 509 * Find a suitable buddy for the given order and PTE type (big or little).
478 */ 510 */
479static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a, 511static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a,
480 u64 order, int pte_size) 512 u64 order, u32 pte_size)
481{ 513{
482 struct nvgpu_buddy *bud; 514 struct nvgpu_buddy *bud;
483 515
@@ -487,7 +519,7 @@ static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a,
487 } 519 }
488 520
489 if (a->flags & GPU_ALLOC_GVA_SPACE && 521 if (a->flags & GPU_ALLOC_GVA_SPACE &&
490 pte_size == GMMU_PAGE_SIZE_BIG) { 522 pte_size == BALLOC_PTE_SIZE_BIG) {
491 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order), 523 bud = nvgpu_list_last_entry(balloc_get_order_list(a, order),
492 nvgpu_buddy, buddy_entry); 524 nvgpu_buddy, buddy_entry);
493 } else { 525 } else {
@@ -514,7 +546,7 @@ static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a,
514 * @a must be locked. 546 * @a must be locked.
515 */ 547 */
516static u64 __balloc_do_alloc(struct nvgpu_buddy_allocator *a, 548static u64 __balloc_do_alloc(struct nvgpu_buddy_allocator *a,
517 u64 order, int pte_size) 549 u64 order, u32 pte_size)
518{ 550{
519 u64 split_order; 551 u64 split_order;
520 struct nvgpu_buddy *bud = NULL; 552 struct nvgpu_buddy *bud = NULL;
@@ -637,7 +669,7 @@ static void __balloc_get_parent_range(struct nvgpu_buddy_allocator *a,
637 * necessary for this buddy to exist as well. 669 * necessary for this buddy to exist as well.
638 */ 670 */
639static struct nvgpu_buddy *__balloc_make_fixed_buddy( 671static struct nvgpu_buddy *__balloc_make_fixed_buddy(
640 struct nvgpu_buddy_allocator *a, u64 base, u64 order, int pte_size) 672 struct nvgpu_buddy_allocator *a, u64 base, u64 order, u32 pte_size)
641{ 673{
642 struct nvgpu_buddy *bud = NULL; 674 struct nvgpu_buddy *bud = NULL;
643 struct nvgpu_list_node *order_list; 675 struct nvgpu_list_node *order_list;
@@ -714,11 +746,19 @@ static struct nvgpu_buddy *__balloc_make_fixed_buddy(
714 746
715static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a, 747static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
716 struct nvgpu_fixed_alloc *falloc, 748 struct nvgpu_fixed_alloc *falloc,
717 u64 base, u64 len, int pte_size) 749 u64 base, u64 len, u32 pte_size)
718{ 750{
719 u64 shifted_base, inc_base; 751 u64 shifted_base, inc_base;
720 u64 align_order; 752 u64 align_order;
721 753
754 /*
755 * Ensure that we have a valid PTE size here (ANY is a valid size). If
756 * this is INVALID then we are going to experience imminent corruption
757 * in the lists that hold buddies. This leads to some very strange
758 * crashes.
759 */
760 BUG_ON(pte_size == BALLOC_PTE_SIZE_INVALID);
761
722 shifted_base = balloc_base_shift(a, base); 762 shifted_base = balloc_base_shift(a, base);
723 if (shifted_base == 0U) { 763 if (shifted_base == 0U) {
724 align_order = __fls(len >> a->blk_shift); 764 align_order = __fls(len >> a->blk_shift);
@@ -814,10 +854,11 @@ static void __balloc_do_free_fixed(struct nvgpu_buddy_allocator *a,
814/* 854/*
815 * Allocate memory from the passed allocator. 855 * Allocate memory from the passed allocator.
816 */ 856 */
817static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *na, u64 len) 857static u64 nvgpu_buddy_balloc_pte(struct nvgpu_allocator *na, u64 len,
858 u32 page_size)
818{ 859{
819 u64 order, addr; 860 u64 order, addr;
820 int pte_size; 861 u32 pte_size;
821 struct nvgpu_buddy_allocator *a = na->priv; 862 struct nvgpu_buddy_allocator *a = na->priv;
822 863
823 alloc_lock(na); 864 alloc_lock(na);
@@ -830,22 +871,21 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *na, u64 len)
830 return 0; 871 return 0;
831 } 872 }
832 873
833 if (a->flags & GPU_ALLOC_GVA_SPACE) { 874 pte_size = nvgpu_balloc_page_size_to_pte_size(a, page_size);
834 pte_size = __get_pte_size(a->vm, 0, len); 875 if (pte_size == BALLOC_PTE_SIZE_INVALID) {
835 } else { 876 return 0ULL;
836 pte_size = BALLOC_PTE_SIZE_ANY;
837 } 877 }
838 878
839 addr = __balloc_do_alloc(a, order, pte_size); 879 addr = __balloc_do_alloc(a, order, pte_size);
840 880
841 if (addr) { 881 if (addr != 0ULL) {
842 a->bytes_alloced += len; 882 a->bytes_alloced += len;
843 a->bytes_alloced_real += balloc_order_to_len(a, order); 883 a->bytes_alloced_real += balloc_order_to_len(a, order);
844 alloc_dbg(balloc_owner(a), 884 alloc_dbg(balloc_owner(a),
845 "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s", 885 "Alloc 0x%-10llx %3lld:0x%-10llx pte_size=%s",
846 addr, order, len, 886 addr, order, len,
847 pte_size == GMMU_PAGE_SIZE_BIG ? "big" : 887 pte_size == BALLOC_PTE_SIZE_BIG ? "big" :
848 pte_size == GMMU_PAGE_SIZE_SMALL ? "small" : 888 pte_size == BALLOC_PTE_SIZE_SMALL ? "small" :
849 "NA/any"); 889 "NA/any");
850 } else { 890 } else {
851 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!"); 891 alloc_dbg(balloc_owner(a), "Alloc failed: no mem!");
@@ -858,13 +898,15 @@ static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *na, u64 len)
858 return addr; 898 return addr;
859} 899}
860 900
861/* 901static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *na, u64 len)
862 * Requires @na to be locked. 902{
863 */ 903 return nvgpu_buddy_balloc_pte(na, len, BALLOC_PTE_SIZE_ANY);
904}
905
864static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na, 906static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
865 u64 base, u64 len, u32 page_size) 907 u64 base, u64 len, u32 page_size)
866{ 908{
867 int pte_size = BALLOC_PTE_SIZE_ANY; 909 u32 pte_size;
868 u64 ret, real_bytes = 0; 910 u64 ret, real_bytes = 0;
869 struct nvgpu_buddy *bud; 911 struct nvgpu_buddy *bud;
870 struct nvgpu_fixed_alloc *falloc = NULL; 912 struct nvgpu_fixed_alloc *falloc = NULL;
@@ -879,15 +921,9 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
879 goto fail; 921 goto fail;
880 } 922 }
881 923
882 /* Check that the page size is valid. */ 924 pte_size = nvgpu_balloc_page_size_to_pte_size(a, page_size);
883 if (a->flags & GPU_ALLOC_GVA_SPACE && a->vm->big_pages) { 925 if (pte_size == BALLOC_PTE_SIZE_INVALID) {
884 if (page_size == a->vm->big_page_size) { 926 goto fail;
885 pte_size = GMMU_PAGE_SIZE_BIG;
886 } else if (page_size == SZ_4K) {
887 pte_size = GMMU_PAGE_SIZE_SMALL;
888 } else {
889 goto fail;
890 }
891 } 927 }
892 928
893 falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc)); 929 falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(na), sizeof(*falloc));
@@ -903,7 +939,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
903 alloc_dbg(balloc_owner(a), 939 alloc_dbg(balloc_owner(a),
904 "Range not free: 0x%llx -> 0x%llx", 940 "Range not free: 0x%llx -> 0x%llx",
905 base, base + len); 941 base, base + len);
906 goto fail_unlock; 942 goto fail;
907 } 943 }
908 944
909 ret = __balloc_do_alloc_fixed(a, falloc, base, len, pte_size); 945 ret = __balloc_do_alloc_fixed(a, falloc, base, len, pte_size);
@@ -911,7 +947,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
911 alloc_dbg(balloc_owner(a), 947 alloc_dbg(balloc_owner(a),
912 "Alloc-fixed failed ?? 0x%llx -> 0x%llx", 948 "Alloc-fixed failed ?? 0x%llx -> 0x%llx",
913 base, base + len); 949 base, base + len);
914 goto fail_unlock; 950 goto fail;
915 } 951 }
916 952
917 balloc_alloc_fixed(a, falloc); 953 balloc_alloc_fixed(a, falloc);
@@ -928,8 +964,6 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *na,
928 964
929 return base; 965 return base;
930 966
931fail_unlock:
932 alloc_unlock(na);
933fail: 967fail:
934 nvgpu_kfree(nvgpu_alloc_to_gpu(na), falloc); 968 nvgpu_kfree(nvgpu_alloc_to_gpu(na), falloc);
935 return 0; 969 return 0;
@@ -1051,7 +1085,8 @@ static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *na,
1051 } 1085 }
1052 1086
1053 /* Should not be possible to fail... */ 1087 /* Should not be possible to fail... */
1054 addr = __nvgpu_balloc_fixed_buddy(na, co->base, co->length, 0); 1088 addr = __nvgpu_balloc_fixed_buddy(na, co->base, co->length,
1089 BALLOC_PTE_SIZE_ANY);
1055 if (!addr) { 1090 if (!addr) {
1056 err = -ENOMEM; 1091 err = -ENOMEM;
1057 nvgpu_warn(na->g, 1092 nvgpu_warn(na->g,
@@ -1206,6 +1241,7 @@ static void nvgpu_buddy_print_stats(struct nvgpu_allocator *na,
1206 1241
1207static const struct nvgpu_allocator_ops buddy_ops = { 1242static const struct nvgpu_allocator_ops buddy_ops = {
1208 .alloc = nvgpu_buddy_balloc, 1243 .alloc = nvgpu_buddy_balloc,
1244 .alloc_pte = nvgpu_buddy_balloc_pte,
1209 .free = nvgpu_buddy_bfree, 1245 .free = nvgpu_buddy_bfree,
1210 1246
1211 .alloc_fixed = nvgpu_balloc_fixed_buddy, 1247 .alloc_fixed = nvgpu_balloc_fixed_buddy,
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
index fe3926b9..a90530b6 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
@@ -46,18 +46,20 @@ struct nvgpu_buddy {
46 u64 end; /* End address of this buddy. */ 46 u64 end; /* End address of this buddy. */
47 u64 order; /* Buddy order. */ 47 u64 order; /* Buddy order. */
48 48
49#define BALLOC_BUDDY_ALLOCED 0x1 49#define BALLOC_BUDDY_ALLOCED 0x1U
50#define BALLOC_BUDDY_SPLIT 0x2 50#define BALLOC_BUDDY_SPLIT 0x2U
51#define BALLOC_BUDDY_IN_LIST 0x4 51#define BALLOC_BUDDY_IN_LIST 0x4U
52 int flags; /* List of associated flags. */ 52 u32 flags; /* List of associated flags. */
53 53
54 /* 54 /*
55 * Size of the PDE this buddy is using. This allows for grouping like 55 * Size of the PDE this buddy is using. This allows for grouping like
56 * sized allocations into the same PDE. This uses the gmmu_pgsz_gk20a 56 * sized allocations into the same PDE.
57 * enum except for the BALLOC_PTE_SIZE_ANY specifier.
58 */ 57 */
59#define BALLOC_PTE_SIZE_ANY -1 58#define BALLOC_PTE_SIZE_ANY (~0U)
60 int pte_size; 59#define BALLOC_PTE_SIZE_INVALID 0U
60#define BALLOC_PTE_SIZE_SMALL 1U
61#define BALLOC_PTE_SIZE_BIG 2U
62 u32 pte_size;
61}; 63};
62 64
63static inline struct nvgpu_buddy * 65static inline struct nvgpu_buddy *
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
index 4057a599..ec0aa888 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
@@ -77,6 +77,11 @@ u64 nvgpu_alloc(struct nvgpu_allocator *a, u64 len)
77 return a->ops->alloc(a, len); 77 return a->ops->alloc(a, len);
78} 78}
79 79
80u64 nvgpu_alloc_pte(struct nvgpu_allocator *a, u64 len, u32 page_size)
81{
82 return a->ops->alloc_pte(a, len, page_size);
83}
84
80void nvgpu_free(struct nvgpu_allocator *a, u64 addr) 85void nvgpu_free(struct nvgpu_allocator *a, u64 addr)
81{ 86{
82 a->ops->free(a, addr); 87 a->ops->free(a, addr);
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 3cb8ed60..57d9afb5 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -152,7 +152,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
152 /* Be certain we round up to page_size if needed */ 152 /* Be certain we round up to page_size if needed */
153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U); 153 size = (size + ((u64)page_size - 1U)) & ~((u64)page_size - 1U);
154 154
155 addr = nvgpu_alloc(vma, size); 155 addr = nvgpu_alloc_pte(vma, size, page_size);
156 if (!addr) { 156 if (!addr) {
157 nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size); 157 nvgpu_err(g, "(%s) oom: sz=0x%llx", vma->name, size);
158 return 0; 158 return 0;
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index c2c0d569..d096de5d 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -99,11 +99,22 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
99 struct nvgpu_allocator *vma; 99 struct nvgpu_allocator *vma;
100 struct nvgpu_vm_area *vm_area; 100 struct nvgpu_vm_area *vm_area;
101 u64 vaddr_start = 0; 101 u64 vaddr_start = 0;
102 u64 our_addr = *addr;
102 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL; 103 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL;
103 104
105 /*
106 * If we have a fixed address then use the passed address in *addr. This
107 * corresponds to the o_a field in the IOCTL. But since we do not
108 * support specific alignments in the buddy allocator we ignore the
109 * field if it isn't a fixed offset.
110 */
111 if ((flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) != 0U) {
112 our_addr = *addr;
113 }
114
104 nvgpu_log(g, gpu_dbg_map, 115 nvgpu_log(g, gpu_dbg_map,
105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", 116 "ADD vm_area: pgsz=%#-8x pages=%-9u a/o=%#-14llx flags=0x%x",
106 page_size, pages, *addr, flags); 117 page_size, pages, our_addr, flags);
107 118
108 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) { 119 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) {
109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { 120 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) {
@@ -133,14 +144,15 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
133 144
134 vma = vm->vma[pgsz_idx]; 145 vma = vm->vma[pgsz_idx];
135 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) { 146 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) {
136 vaddr_start = nvgpu_alloc_fixed(vma, *addr, 147 vaddr_start = nvgpu_alloc_fixed(vma, our_addr,
137 (u64)pages * 148 (u64)pages *
138 (u64)page_size, 149 (u64)page_size,
139 page_size); 150 page_size);
140 } else { 151 } else {
141 vaddr_start = nvgpu_alloc(vma, 152 vaddr_start = nvgpu_alloc_pte(vma,
142 (u64)pages * 153 (u64)pages *
143 (u64)page_size); 154 (u64)page_size,
155 page_size);
144 } 156 }
145 157
146 if (!vaddr_start) { 158 if (!vaddr_start) {