summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm_area.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm_area.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
index c2c0d569..d096de5d 100644
--- a/drivers/gpu/nvgpu/common/mm/vm_area.c
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -99,11 +99,22 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
99 struct nvgpu_allocator *vma; 99 struct nvgpu_allocator *vma;
100 struct nvgpu_vm_area *vm_area; 100 struct nvgpu_vm_area *vm_area;
101 u64 vaddr_start = 0; 101 u64 vaddr_start = 0;
102 u64 our_addr = *addr;
102 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL; 103 u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL;
103 104
105 /*
106 * If we have a fixed address then use the passed address in *addr. This
107 * corresponds to the o_a field in the IOCTL. But since we do not
108 * support specific alignments in the buddy allocator we ignore the
109 * field if it isn't a fixed offset.
110 */
111 if ((flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) != 0U) {
112 our_addr = *addr;
113 }
114
104 nvgpu_log(g, gpu_dbg_map, 115 nvgpu_log(g, gpu_dbg_map,
105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", 116 "ADD vm_area: pgsz=%#-8x pages=%-9u a/o=%#-14llx flags=0x%x",
106 page_size, pages, *addr, flags); 117 page_size, pages, our_addr, flags);
107 118
108 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) { 119 for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) {
109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { 120 if (vm->gmmu_page_sizes[pgsz_idx] == page_size) {
@@ -133,14 +144,15 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
133 144
134 vma = vm->vma[pgsz_idx]; 145 vma = vm->vma[pgsz_idx];
135 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) { 146 if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) {
136 vaddr_start = nvgpu_alloc_fixed(vma, *addr, 147 vaddr_start = nvgpu_alloc_fixed(vma, our_addr,
137 (u64)pages * 148 (u64)pages *
138 (u64)page_size, 149 (u64)page_size,
139 page_size); 150 page_size);
140 } else { 151 } else {
141 vaddr_start = nvgpu_alloc(vma, 152 vaddr_start = nvgpu_alloc_pte(vma,
142 (u64)pages * 153 (u64)pages *
143 (u64)page_size); 154 (u64)page_size,
155 page_size);
144 } 156 }
145 157
146 if (!vaddr_start) { 158 if (!vaddr_start) {