summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu
diff options
context:
space:
mode:
authorAingara Paramakuru <aparamakuru@nvidia.com>2015-02-04 19:18:45 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 21:07:11 -0400
commitc7a3903fd0df0c480f1883c3929bf82c74dc0f45 (patch)
tree237ee050338fa0bd4d1f04fe77a56814730d841c /drivers/gpu/nvgpu/vgpu
parentbc1b5fdd56fff2a64a78b4a190897e34f9f08845 (diff)
gpu: nvgpu: vgpu: fix AS split
The GVA was increased to 128GB but for vgpu, the split was not updated to reflect the correct small and large page split (16GB for small pages, rest for large pages). Bug 1606860 Change-Id: Ieae056d6a6cfd2f2fc5066d33e1247d2a96a3616 Signed-off-by: Aingara Paramakuru <aparamakuru@nvidia.com> Reviewed-on: http://git-master/r/681340 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c69
1 files changed, 47 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 82d16bd1..79b95941 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -240,10 +240,11 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
240 struct tegra_vgpu_as_share_params *p = &msg.params.as_share; 240 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
241 struct mm_gk20a *mm = &g->mm; 241 struct mm_gk20a *mm = &g->mm;
242 struct vm_gk20a *vm; 242 struct vm_gk20a *vm;
243 u64 vma_size; 243 u32 num_small_pages, num_large_pages, low_hole_pages;
244 u32 num_pages, low_hole_pages; 244 u64 small_vma_size, large_vma_size;
245 char name[32]; 245 char name[32];
246 int err, i; 246 int err, i;
247 u32 start;
247 248
248 /* note: keep the page sizes sorted lowest to highest here */ 249 /* note: keep the page sizes sorted lowest to highest here */
249 u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { 250 u32 gmmu_page_sizes[gmmu_nr_page_sizes] = {
@@ -278,36 +279,47 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
278 msg.handle = platform->virt_handle; 279 msg.handle = platform->virt_handle;
279 p->size = vm->va_limit; 280 p->size = vm->va_limit;
280 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 281 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
281 if (err || msg.ret) 282 if (err || msg.ret) {
282 return -ENOMEM; 283 err = -ENOMEM;
284 goto clean_up;
285 }
283 286
284 vm->handle = p->handle; 287 vm->handle = p->handle;
285 288
286 /* low-half: alloc small pages */ 289 /* First 16GB of the address space goes towards small pages. What ever
287 /* high-half: alloc big pages */ 290 * remains is allocated to large pages. */
288 vma_size = mm->channel.size >> 1; 291 small_vma_size = (u64)16 << 30;
292 large_vma_size = vm->va_limit - small_vma_size;
289 293
290 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 294 num_small_pages = (u32)(small_vma_size >>
291 gmmu_page_sizes[gmmu_page_size_small]>>10); 295 ilog2(vm->gmmu_page_sizes[gmmu_page_size_small]));
292 num_pages = (u32)(vma_size >>
293 ilog2(gmmu_page_sizes[gmmu_page_size_small]));
294 296
295 /* num_pages above is without regard to the low-side hole. */ 297 /* num_pages above is without regard to the low-side hole. */
296 low_hole_pages = (vm->va_start >> 298 low_hole_pages = (vm->va_start >>
297 ilog2(gmmu_page_sizes[gmmu_page_size_small])); 299 ilog2(vm->gmmu_page_sizes[gmmu_page_size_small]));
298
299 gk20a_allocator_init(&vm->vma[gmmu_page_size_small], name,
300 low_hole_pages, /* start */
301 num_pages - low_hole_pages); /* length */
302 300
303 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 301 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
304 gmmu_page_sizes[gmmu_page_size_big]>>10); 302 gmmu_page_sizes[gmmu_page_size_small]>>10);
303 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_small],
304 name,
305 low_hole_pages, /*start*/
306 num_small_pages - low_hole_pages);/* length*/
307 if (err)
308 goto clean_up_share;
309
310 start = (u32)(small_vma_size >>
311 ilog2(vm->gmmu_page_sizes[gmmu_page_size_big]));
312 num_large_pages = (u32)(large_vma_size >>
313 ilog2(vm->gmmu_page_sizes[gmmu_page_size_big]));
305 314
306 num_pages = (u32)(vma_size >> 315 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
307 ilog2(gmmu_page_sizes[gmmu_page_size_big])); 316 gmmu_page_sizes[gmmu_page_size_big]>>10);
308 gk20a_allocator_init(&vm->vma[gmmu_page_size_big], name, 317 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_big],
309 num_pages, /* start */ 318 name,
310 num_pages); /* length */ 319 start, /* start */
320 num_large_pages); /* length */
321 if (err)
322 goto clean_up_small_allocator;
311 323
312 vm->mapped_buffers = RB_ROOT; 324 vm->mapped_buffers = RB_ROOT;
313 325
@@ -318,6 +330,19 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
318 vm->enable_ctag = true; 330 vm->enable_ctag = true;
319 331
320 return 0; 332 return 0;
333
334clean_up_small_allocator:
335 gk20a_allocator_destroy(&vm->vma[gmmu_page_size_small]);
336clean_up_share:
337 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
338 msg.handle = platform->virt_handle;
339 p->handle = vm->handle;
340 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
341 WARN_ON(err || msg.ret);
342clean_up:
343 kfree(vm);
344 as_share->vm = NULL;
345 return err;
321} 346}
322 347
323static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share, 348static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,