summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/as.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-05-09 19:41:18 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-06 20:09:11 -0400
commitc2b63150cd947557b8d17637258b988459b8e0ec (patch)
tree5b15911b0b4799538ca98c9b6c1a31c0cbfe4a99 /drivers/gpu/nvgpu/common/as.c
parent6bd7d22c0f248d0d29ea44b06798b247d0d2753a (diff)
gpu: nvgpu: Unify vm_init for vGPU and regular GPU
Unify the initialization routines for the vGPU and regular GPU paths. This helps avoid any further code divergence. This also assumes that the code running on the regular GPU essentially works for the vGPU. The only addition is that the regular GPU path calls an API in the vGPU code that sends the necessary RM server message. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I37af1993fd8b50f666ae27524d382cce49cf28f7 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1480226 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/as.c')
-rw-r--r--drivers/gpu/nvgpu/common/as.c63
1 files changed, 62 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c
index 3182642a..481fb807 100644
--- a/drivers/gpu/nvgpu/common/as.c
+++ b/drivers/gpu/nvgpu/common/as.c
@@ -16,8 +16,10 @@
16#include <trace/events/gk20a.h> 16#include <trace/events/gk20a.h>
17 17
18#include <nvgpu/kmem.h> 18#include <nvgpu/kmem.h>
19#include <nvgpu/vm.h>
19 20
20#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
22#include "gk20a/platform_gk20a.h"
21 23
22/* dumb allocator... */ 24/* dumb allocator... */
23static int generate_as_share_id(struct gk20a_as *as) 25static int generate_as_share_id(struct gk20a_as *as)
@@ -32,6 +34,51 @@ static void release_as_share_id(struct gk20a_as *as, int id)
32 return; 34 return;
33} 35}
34 36
37/* address space interfaces for the gk20a module */
38static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
39 u32 big_page_size, u32 flags)
40{
41 struct gk20a_as *as = as_share->as;
42 struct gk20a *g = gk20a_from_as(as);
43 struct mm_gk20a *mm = &g->mm;
44 struct vm_gk20a *vm;
45 char name[32];
46 int err;
47 const bool userspace_managed =
48 (flags & NVGPU_GPU_IOCTL_ALLOC_AS_FLAGS_USERSPACE_MANAGED) != 0;
49
50 gk20a_dbg_fn("");
51
52 if (big_page_size == 0) {
53 big_page_size =
54 gk20a_get_platform(g->dev)->default_big_page_size;
55 } else {
56 if (!is_power_of_2(big_page_size))
57 return -EINVAL;
58
59 if (!(big_page_size & g->gpu_characteristics.available_big_page_sizes))
60 return -EINVAL;
61 }
62
63 vm = nvgpu_kzalloc(g, sizeof(*vm));
64 if (!vm)
65 return -ENOMEM;
66
67 as_share->vm = vm;
68 vm->as_share = as_share;
69 vm->enable_ctag = true;
70
71 snprintf(name, sizeof(name), "as_%d", as_share->id);
72
73 err = nvgpu_init_vm(mm, vm, big_page_size,
74 big_page_size << 10,
75 mm->channel.kernel_size,
76 mm->channel.user_size + mm->channel.kernel_size,
77 !mm->disable_bigpage, userspace_managed, name);
78
79 return err;
80}
81
35int gk20a_as_alloc_share(struct gk20a *g, 82int gk20a_as_alloc_share(struct gk20a *g,
36 u32 big_page_size, u32 flags, 83 u32 big_page_size, u32 flags,
37 struct gk20a_as_share **out) 84 struct gk20a_as_share **out)
@@ -56,7 +103,7 @@ int gk20a_as_alloc_share(struct gk20a *g,
56 err = gk20a_busy(g); 103 err = gk20a_busy(g);
57 if (err) 104 if (err)
58 goto failed; 105 goto failed;
59 err = g->ops.mm.vm_alloc_share(as_share, big_page_size, flags); 106 err = gk20a_vm_alloc_share(as_share, big_page_size, flags);
60 gk20a_idle(g); 107 gk20a_idle(g);
61 108
62 if (err) 109 if (err)
@@ -70,6 +117,20 @@ failed:
70 return err; 117 return err;
71} 118}
72 119
120int gk20a_vm_release_share(struct gk20a_as_share *as_share)
121{
122 struct vm_gk20a *vm = as_share->vm;
123
124 gk20a_dbg_fn("");
125
126 vm->as_share = NULL;
127 as_share->vm = NULL;
128
129 nvgpu_vm_put(vm);
130
131 return 0;
132}
133
73/* 134/*
74 * channels and the device nodes call this to release. 135 * channels and the device nodes call this to release.
75 * once the ref_cnt hits zero the share is deleted. 136 * once the ref_cnt hits zero the share is deleted.