summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/as_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-06-24 17:12:24 -0400
committerAlex Waterman <alexw@nvidia.com>2016-07-19 14:21:46 -0400
commitb6569319c772d84087a0a1a6d7146bdcae8e9aab (patch)
tree16e7bae422279925301d9116b1e7f4d8aa656483 /drivers/gpu/nvgpu/gk20a/as_gk20a.c
parentf4b77e465648e87b19a7df4bb2a121ac8ac1b851 (diff)
gpu: nvgpu: Support multiple types of allocators
Support multiple types of allocation backends. Currently there is only one allocator implementation available: a buddy allocator. Buddy allocators have certain limitations though. For one the allocator requires metadata to be allocated from the kernel's system memory. This causes a given buddy allocation to potentially sleep on a kmalloc() call. This patch has been created so that a new backend can be created which will avoid any dynamic system memory management routines from being called. Bug 1781897 Change-Id: I98d6c8402c049942f13fee69c6901a166f177f65 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1172115 GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/as_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
index 0571ca1f..8144ec6e 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
@@ -279,16 +279,17 @@ static int gk20a_as_ioctl_get_va_regions(
279 279
280 for (i = 0; i < write_entries; ++i) { 280 for (i = 0; i < write_entries; ++i) {
281 struct nvgpu_as_va_region region; 281 struct nvgpu_as_va_region region;
282 struct gk20a_allocator *vma = vm->fixed.init ? 282 struct gk20a_allocator *vma =
283 gk20a_alloc_initialized(&vm->fixed) ?
283 &vm->fixed : &vm->vma[i]; 284 &vm->fixed : &vm->vma[i];
284 285
285 memset(&region, 0, sizeof(struct nvgpu_as_va_region)); 286 memset(&region, 0, sizeof(struct nvgpu_as_va_region));
286 287
287 region.page_size = vm->gmmu_page_sizes[i]; 288 region.page_size = vm->gmmu_page_sizes[i];
288 region.offset = vma->base; 289 region.offset = gk20a_alloc_base(vma);
289 /* No __aeabi_uldivmod() on some platforms... */ 290 /* No __aeabi_uldivmod() on some platforms... */
290 region.pages = (vma->end - vma->start) >> 291 region.pages = (gk20a_alloc_end(vma) -
291 ilog2(region.page_size); 292 gk20a_alloc_base(vma)) >> ilog2(region.page_size);
292 293
293 if (copy_to_user(user_region_ptr + i, &region, sizeof(region))) 294 if (copy_to_user(user_region_ptr + i, &region, sizeof(region)))
294 return -EFAULT; 295 return -EFAULT;