summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
authorBharat Nihalani <bnihalani@nvidia.com>2015-05-29 06:56:23 -0400
committerBharat Nihalani <bnihalani@nvidia.com>2015-06-02 23:18:55 -0400
commit1d8fdf56959240622073dd771dd9bfccf31b8f8e (patch)
tree5c670e604825ddc25d6b6b0cce32cb3e7dc6871a /drivers/gpu/nvgpu/vgpu/mm_vgpu.c
parent38cee4d7effe5a2079a08b3c9a216b3197893959 (diff)
Revert "Revert "Revert "gpu: nvgpu: New allocator for VA space"""
This reverts commit ce1cf06b9a8eb6314ba0ca294e8cb430e1e141c0 since it causes GPU pbdma interrupt to be generated. Bug 200106514 Change-Id: If3ed9a914c4e3e7f3f98c6609c6dbf57e1eb9aad Signed-off-by: Bharat Nihalani <bnihalani@nvidia.com> Reviewed-on: http://git-master/r/749291
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 855aac0d..94e4602f 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -243,9 +243,11 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
243 struct tegra_vgpu_as_share_params *p = &msg.params.as_share; 243 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
244 struct mm_gk20a *mm = &g->mm; 244 struct mm_gk20a *mm = &g->mm;
245 struct vm_gk20a *vm; 245 struct vm_gk20a *vm;
246 u32 num_small_pages, num_large_pages, low_hole_pages;
246 u64 small_vma_size, large_vma_size; 247 u64 small_vma_size, large_vma_size;
247 char name[32]; 248 char name[32];
248 int err, i; 249 int err, i;
250 u32 start;
249 251
250 /* note: keep the page sizes sorted lowest to highest here */ 252 /* note: keep the page sizes sorted lowest to highest here */
251 u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { 253 u32 gmmu_page_sizes[gmmu_nr_page_sizes] = {
@@ -292,27 +294,33 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
292 small_vma_size = (u64)16 << 30; 294 small_vma_size = (u64)16 << 30;
293 large_vma_size = vm->va_limit - small_vma_size; 295 large_vma_size = vm->va_limit - small_vma_size;
294 296
297 num_small_pages = (u32)(small_vma_size >>
298 ilog2(vm->gmmu_page_sizes[gmmu_page_size_small]));
299
300 /* num_pages above is without regard to the low-side hole. */
301 low_hole_pages = (vm->va_start >>
302 ilog2(vm->gmmu_page_sizes[gmmu_page_size_small]));
303
295 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 304 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
296 gmmu_page_sizes[gmmu_page_size_small]>>10); 305 gmmu_page_sizes[gmmu_page_size_small]>>10);
297 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_small], 306 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_small],
298 vm, name, 307 name,
299 vm->va_start, 308 low_hole_pages, /*start*/
300 small_vma_size - vm->va_start, 309 num_small_pages - low_hole_pages);/* length*/
301 SZ_4K,
302 GPU_BALLOC_MAX_ORDER,
303 GPU_BALLOC_GVA_SPACE);
304 if (err) 310 if (err)
305 goto clean_up_share; 311 goto clean_up_share;
306 312
313 start = (u32)(small_vma_size >>
314 ilog2(vm->gmmu_page_sizes[gmmu_page_size_big]));
315 num_large_pages = (u32)(large_vma_size >>
316 ilog2(vm->gmmu_page_sizes[gmmu_page_size_big]));
317
307 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 318 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
308 gmmu_page_sizes[gmmu_page_size_big]>>10); 319 gmmu_page_sizes[gmmu_page_size_big]>>10);
309 err = __gk20a_allocator_init(&vm->vma[gmmu_page_size_big], 320 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_big],
310 vm, name, 321 name,
311 small_vma_size, 322 start, /* start */
312 large_vma_size, 323 num_large_pages); /* length */
313 big_page_size,
314 GPU_BALLOC_MAX_ORDER,
315 GPU_BALLOC_GVA_SPACE);
316 if (err) 324 if (err)
317 goto clean_up_small_allocator; 325 goto clean_up_small_allocator;
318 326