summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-10-09 14:53:19 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2015-10-22 12:27:30 -0400
commit37255d42cc1eee1dc1de94bd651461a46c8afbe9 (patch)
treefa78d6b0e7caa739474d1a7dc82f6412148ad542 /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parentfb3a1d31cd84771b659050ea1aa920bdf06ffb04 (diff)
gpu: nvgpu: vgpu: Alloc kernel address space
JIRA VFND-890 Change-Id: I8eba041b663cead94f2cc3d75d6458d472f1a755 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: http://git-master/r/815378 (cherry picked from commit 4b52329e955758ec4368abcb463ce4e3a2653237) Reviewed-on: http://git-master/r/820499
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index b259a0c3..a4ec5254 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -144,7 +144,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
144 144
145 /* Circular Buffer */ 145 /* Circular Buffer */
146 gpu_va = gk20a_vm_alloc_va(ch_vm, 146 gpu_va = gk20a_vm_alloc_va(ch_vm,
147 gr->global_ctx_buffer[CIRCULAR].mem.size, 0); 147 gr->global_ctx_buffer[CIRCULAR].mem.size,
148 gmmu_page_size_kernel);
148 149
149 if (!gpu_va) 150 if (!gpu_va)
150 goto clean_up; 151 goto clean_up;
@@ -153,7 +154,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
153 154
154 /* Attribute Buffer */ 155 /* Attribute Buffer */
155 gpu_va = gk20a_vm_alloc_va(ch_vm, 156 gpu_va = gk20a_vm_alloc_va(ch_vm,
156 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 0); 157 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
158 gmmu_page_size_kernel);
157 159
158 if (!gpu_va) 160 if (!gpu_va)
159 goto clean_up; 161 goto clean_up;
@@ -162,7 +164,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
162 164
163 /* Page Pool */ 165 /* Page Pool */
164 gpu_va = gk20a_vm_alloc_va(ch_vm, 166 gpu_va = gk20a_vm_alloc_va(ch_vm,
165 gr->global_ctx_buffer[PAGEPOOL].mem.size, 0); 167 gr->global_ctx_buffer[PAGEPOOL].mem.size,
168 gmmu_page_size_kernel);
166 if (!gpu_va) 169 if (!gpu_va)
167 goto clean_up; 170 goto clean_up;
168 g_bfr_va[PAGEPOOL_VA] = gpu_va; 171 g_bfr_va[PAGEPOOL_VA] = gpu_va;
@@ -170,7 +173,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
170 173
171 /* Priv register Access Map */ 174 /* Priv register Access Map */
172 gpu_va = gk20a_vm_alloc_va(ch_vm, 175 gpu_va = gk20a_vm_alloc_va(ch_vm,
173 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 0); 176 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
177 gmmu_page_size_kernel);
174 if (!gpu_va) 178 if (!gpu_va)
175 goto clean_up; 179 goto clean_up;
176 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 180 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
@@ -257,7 +261,9 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
257 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; 261 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size;
258 262
259 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size; 263 gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
260 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(vm, gr_ctx->mem.size, 0); 264 gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(vm,
265 gr_ctx->mem.size,
266 gmmu_page_size_kernel);
261 267
262 if (!gr_ctx->mem.gpu_va) { 268 if (!gr_ctx->mem.gpu_va) {
263 kfree(gr_ctx); 269 kfree(gr_ctx);
@@ -351,7 +357,8 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
351 357
352 patch_ctx->mem.size = 128 * sizeof(u32); 358 patch_ctx->mem.size = 128 * sizeof(u32);
353 patch_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, 359 patch_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm,
354 patch_ctx->mem.size, 0); 360 patch_ctx->mem.size,
361 gmmu_page_size_kernel);
355 if (!patch_ctx->mem.gpu_va) 362 if (!patch_ctx->mem.gpu_va)
356 return -ENOMEM; 363 return -ENOMEM;
357 364