summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2016-07-21 19:51:40 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-08-15 14:41:16 -0400
commite1438818b90c5b0d73aae800b12bd6b36aec5142 (patch)
treef0582cda23552526c3067e90f4cb74b461d50d73 /drivers/gpu/nvgpu/vgpu/mm_vgpu.c
parent33ff34887f560449828e79170a2a36a97496eeec (diff)
gpu: nvgpu: vgpu: add vgpu private data and helper functions
Move vgpu private data to a dedicated structure and allocate it at probe time. Also add virt_handle helper function which is used everywhere. JIRA VFND-2103 Change-Id: I125911420be72ca9be948125d8357fa85d1d3afd Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: http://git-master/r/1185206 GVS: Gerrit_Virtual_Submit Reviewed-by: Vladislav Buzov <vbuzov@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c36
1 files changed, 13 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 6b741cd4..b256598f 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtualized GPU Memory Management 2 * Virtualized GPU Memory Management
3 * 3 *
4 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,6 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
86 int err = 0; 86 int err = 0;
87 struct device *d = dev_from_vm(vm); 87 struct device *d = dev_from_vm(vm);
88 struct gk20a *g = gk20a_from_vm(vm); 88 struct gk20a *g = gk20a_from_vm(vm);
89 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
90 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d); 89 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d);
91 struct tegra_vgpu_cmd_msg msg; 90 struct tegra_vgpu_cmd_msg msg;
92 struct tegra_vgpu_as_map_params *p = &msg.params.as_map; 91 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
@@ -114,7 +113,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
114 prot = TEGRA_VGPU_MAP_PROT_NONE; 113 prot = TEGRA_VGPU_MAP_PROT_NONE;
115 114
116 msg.cmd = TEGRA_VGPU_CMD_AS_MAP; 115 msg.cmd = TEGRA_VGPU_CMD_AS_MAP;
117 msg.handle = platform->virt_handle; 116 msg.handle = vgpu_get_handle(g);
118 p->handle = vm->handle; 117 p->handle = vm->handle;
119 p->addr = addr; 118 p->addr = addr;
120 p->gpu_va = map_offset; 119 p->gpu_va = map_offset;
@@ -164,7 +163,6 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
164 struct vm_gk20a_mapping_batch *batch) 163 struct vm_gk20a_mapping_batch *batch)
165{ 164{
166 struct gk20a *g = gk20a_from_vm(vm); 165 struct gk20a *g = gk20a_from_vm(vm);
167 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
168 struct tegra_vgpu_cmd_msg msg; 166 struct tegra_vgpu_cmd_msg msg;
169 struct tegra_vgpu_as_map_params *p = &msg.params.as_map; 167 struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
170 int err; 168 int err;
@@ -181,7 +179,7 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
181 } 179 }
182 180
183 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP; 181 msg.cmd = TEGRA_VGPU_CMD_AS_UNMAP;
184 msg.handle = platform->virt_handle; 182 msg.handle = vgpu_get_handle(g);
185 p->handle = vm->handle; 183 p->handle = vm->handle;
186 p->gpu_va = vaddr; 184 p->gpu_va = vaddr;
187 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 185 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -195,7 +193,6 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
195static void vgpu_vm_remove_support(struct vm_gk20a *vm) 193static void vgpu_vm_remove_support(struct vm_gk20a *vm)
196{ 194{
197 struct gk20a *g = vm->mm->g; 195 struct gk20a *g = vm->mm->g;
198 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
199 struct mapped_buffer_node *mapped_buffer; 196 struct mapped_buffer_node *mapped_buffer;
200 struct vm_reserved_va_node *va_node, *va_node_tmp; 197 struct vm_reserved_va_node *va_node, *va_node_tmp;
201 struct tegra_vgpu_cmd_msg msg; 198 struct tegra_vgpu_cmd_msg msg;
@@ -225,7 +222,7 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
225 } 222 }
226 223
227 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 224 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
228 msg.handle = platform->virt_handle; 225 msg.handle = vgpu_get_handle(g);
229 p->handle = vm->handle; 226 p->handle = vm->handle;
230 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 227 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
231 WARN_ON(err || msg.ret); 228 WARN_ON(err || msg.ret);
@@ -244,7 +241,6 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
244 241
245u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) 242u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
246{ 243{
247 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
248 struct dma_iommu_mapping *mapping = 244 struct dma_iommu_mapping *mapping =
249 to_dma_iommu_mapping(dev_from_gk20a(g)); 245 to_dma_iommu_mapping(dev_from_gk20a(g));
250 u64 addr = g->ops.mm.get_iova_addr(g, (*sgt)->sgl, 0); 246 u64 addr = g->ops.mm.get_iova_addr(g, (*sgt)->sgl, 0);
@@ -253,7 +249,7 @@ u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
253 int err; 249 int err;
254 250
255 msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1; 251 msg.cmd = TEGRA_VGPU_CMD_MAP_BAR1;
256 msg.handle = platform->virt_handle; 252 msg.handle = vgpu_get_handle(g);
257 p->addr = addr; 253 p->addr = addr;
258 p->size = size; 254 p->size = size;
259 p->iova = mapping ? 1 : 0; 255 p->iova = mapping ? 1 : 0;
@@ -320,7 +316,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
320 vm->va_limit = mm->channel.user_size + mm->channel.kernel_size; 316 vm->va_limit = mm->channel.user_size + mm->channel.kernel_size;
321 317
322 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE; 318 msg.cmd = TEGRA_VGPU_CMD_AS_ALLOC_SHARE;
323 msg.handle = platform->virt_handle; 319 msg.handle = vgpu_get_handle(g);
324 p->size = vm->va_limit; 320 p->size = vm->va_limit;
325 p->big_page_size = vm->big_page_size; 321 p->big_page_size = vm->big_page_size;
326 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 322 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -434,7 +430,7 @@ clean_up_small_allocator:
434 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 430 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]);
435clean_up_share: 431clean_up_share:
436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 432 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
437 msg.handle = platform->virt_handle; 433 msg.handle = vgpu_get_handle(g);
438 p->handle = vm->handle; 434 p->handle = vm->handle;
439 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 435 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
440 WARN_ON(err || msg.ret); 436 WARN_ON(err || msg.ret);
@@ -448,7 +444,6 @@ static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
448 struct channel_gk20a *ch) 444 struct channel_gk20a *ch)
449{ 445{
450 struct vm_gk20a *vm = as_share->vm; 446 struct vm_gk20a *vm = as_share->vm;
451 struct gk20a_platform *platform = gk20a_get_platform(ch->g->dev);
452 struct tegra_vgpu_cmd_msg msg; 447 struct tegra_vgpu_cmd_msg msg;
453 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share; 448 struct tegra_vgpu_as_bind_share_params *p = &msg.params.as_bind_share;
454 int err; 449 int err;
@@ -457,7 +452,7 @@ static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
457 452
458 ch->vm = vm; 453 ch->vm = vm;
459 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE; 454 msg.cmd = TEGRA_VGPU_CMD_AS_BIND_SHARE;
460 msg.handle = platform->virt_handle; 455 msg.handle = vgpu_get_handle(ch->g);
461 p->as_handle = vm->handle; 456 p->as_handle = vm->handle;
462 p->chan_handle = ch->virt_ctx; 457 p->chan_handle = ch->virt_ctx;
463 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 458 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
@@ -485,26 +480,23 @@ static void vgpu_cache_maint(u64 handle, u8 op)
485 480
486static int vgpu_mm_fb_flush(struct gk20a *g) 481static int vgpu_mm_fb_flush(struct gk20a *g)
487{ 482{
488 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
489 483
490 gk20a_dbg_fn(""); 484 gk20a_dbg_fn("");
491 485
492 vgpu_cache_maint(platform->virt_handle, TEGRA_VGPU_FB_FLUSH); 486 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
493 return 0; 487 return 0;
494} 488}
495 489
496static void vgpu_mm_l2_invalidate(struct gk20a *g) 490static void vgpu_mm_l2_invalidate(struct gk20a *g)
497{ 491{
498 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
499 492
500 gk20a_dbg_fn(""); 493 gk20a_dbg_fn("");
501 494
502 vgpu_cache_maint(platform->virt_handle, TEGRA_VGPU_L2_MAINT_INV); 495 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
503} 496}
504 497
505static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) 498static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
506{ 499{
507 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
508 u8 op; 500 u8 op;
509 501
510 gk20a_dbg_fn(""); 502 gk20a_dbg_fn("");
@@ -514,13 +506,12 @@ static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
514 else 506 else
515 op = TEGRA_VGPU_L2_MAINT_FLUSH; 507 op = TEGRA_VGPU_L2_MAINT_FLUSH;
516 508
517 vgpu_cache_maint(platform->virt_handle, op); 509 vgpu_cache_maint(vgpu_get_handle(g), op);
518} 510}
519 511
520static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm) 512static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm)
521{ 513{
522 struct gk20a *g = gk20a_from_vm(vm); 514 struct gk20a *g = gk20a_from_vm(vm);
523 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
524 struct tegra_vgpu_cmd_msg msg; 515 struct tegra_vgpu_cmd_msg msg;
525 struct tegra_vgpu_as_invalidate_params *p = &msg.params.as_invalidate; 516 struct tegra_vgpu_as_invalidate_params *p = &msg.params.as_invalidate;
526 int err; 517 int err;
@@ -528,7 +519,7 @@ static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm)
528 gk20a_dbg_fn(""); 519 gk20a_dbg_fn("");
529 520
530 msg.cmd = TEGRA_VGPU_CMD_AS_INVALIDATE; 521 msg.cmd = TEGRA_VGPU_CMD_AS_INVALIDATE;
531 msg.handle = platform->virt_handle; 522 msg.handle = vgpu_get_handle(g);
532 p->handle = vm->handle; 523 p->handle = vm->handle;
533 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 524 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
534 WARN_ON(err || msg.ret); 525 WARN_ON(err || msg.ret);
@@ -536,7 +527,6 @@ static void vgpu_mm_tlb_invalidate(struct vm_gk20a *vm)
536 527
537static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) 528static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
538{ 529{
539 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
540 struct tegra_vgpu_cmd_msg msg; 530 struct tegra_vgpu_cmd_msg msg;
541 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; 531 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
542 int err; 532 int err;
@@ -544,7 +534,7 @@ static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
544 gk20a_dbg_fn(""); 534 gk20a_dbg_fn("");
545 535
546 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE; 536 msg.cmd = TEGRA_VGPU_CMD_SET_MMU_DEBUG_MODE;
547 msg.handle = platform->virt_handle; 537 msg.handle = vgpu_get_handle(g);
548 p->enable = (u32)enable; 538 p->enable = (u32)enable;
549 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 539 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
550 WARN_ON(err || msg.ret); 540 WARN_ON(err || msg.ret);