summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gp10b
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-06 16:10:30 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 15:24:27 -0400
commit5405070ecd27ce462babc1dff231fec5cd8bd6b7 (patch)
tree903461959633aec359b5bb3f4f660c5dcb6bdbcf /drivers/gpu/nvgpu/vgpu/gp10b
parent3a1104c3699b05201abf48ed9283bb8ccbe42732 (diff)
gpu: nvgpu: vgpu: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I071e8c50959bfa81730ca964d912bc69f9c7e6ad Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1457355 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gp10b')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c7
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c7
2 files changed, 6 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index 1a5811fe..cc9c46bf 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -88,7 +88,7 @@ static int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
88 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, 88 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
89 class, graphics_preempt_mode, compute_preempt_mode); 89 class, graphics_preempt_mode, compute_preempt_mode);
90 if (err) { 90 if (err) {
91 gk20a_err(dev_from_gk20a(g), 91 nvgpu_err(g,
92 "set_ctxsw_preemption_mode failed"); 92 "set_ctxsw_preemption_mode failed");
93 goto fail; 93 goto fail;
94 } 94 }
@@ -254,7 +254,7 @@ static int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
254 return err; 254 return err;
255 255
256fail: 256fail:
257 gk20a_err(dev_from_gk20a(g), "%s failed %d", __func__, err); 257 nvgpu_err(g, "%s failed %d", __func__, err);
258 return err; 258 return err;
259} 259}
260 260
@@ -297,8 +297,7 @@ static int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
297 graphics_preempt_mode, 297 graphics_preempt_mode,
298 compute_preempt_mode); 298 compute_preempt_mode);
299 if (err) { 299 if (err) {
300 gk20a_err(dev_from_gk20a(g), 300 nvgpu_err(g, "set_ctxsw_preemption_mode failed");
301 "set_ctxsw_preemption_mode failed");
302 return err; 301 return err;
303 } 302 }
304 } else { 303 } else {
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index 2da18fb8..cfda867c 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -54,7 +54,6 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
54 enum nvgpu_aperture aperture) 54 enum nvgpu_aperture aperture)
55{ 55{
56 int err = 0; 56 int err = 0;
57 struct device *d = dev_from_vm(vm);
58 struct gk20a *g = gk20a_from_vm(vm); 57 struct gk20a *g = gk20a_from_vm(vm);
59 struct tegra_vgpu_cmd_msg msg; 58 struct tegra_vgpu_cmd_msg msg;
60 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex; 59 struct tegra_vgpu_as_map_ex_params *p = &msg.params.as_map_ex;
@@ -82,7 +81,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
82 if (!map_offset) { 81 if (!map_offset) {
83 map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx); 82 map_offset = gk20a_vm_alloc_va(vm, size, pgsz_idx);
84 if (!map_offset) { 83 if (!map_offset) {
85 gk20a_err(d, "failed to allocate va space"); 84 nvgpu_err(g, "failed to allocate va space");
86 err = -ENOMEM; 85 err = -ENOMEM;
87 goto fail; 86 goto fail;
88 } 87 }
@@ -140,7 +139,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
140 vm->gmmu_page_sizes[gmmu_page_size_big]) { 139 vm->gmmu_page_sizes[gmmu_page_size_big]) {
141 pgsz_idx = gmmu_page_size_big; 140 pgsz_idx = gmmu_page_size_big;
142 } else { 141 } else {
143 gk20a_err(d, "invalid kernel page size %d\n", 142 nvgpu_err(g, "invalid kernel page size %d\n",
144 page_size); 143 page_size);
145 goto fail; 144 goto fail;
146 } 145 }
@@ -171,7 +170,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
171fail: 170fail:
172 if (handle) 171 if (handle)
173 tegra_gr_comm_oob_put_ptr(handle); 172 tegra_gr_comm_oob_put_ptr(handle);
174 gk20a_err(d, "%s: failed with err=%d\n", __func__, err); 173 nvgpu_err(g, "%s: failed with err=%d\n", __func__, err);
175 return 0; 174 return 0;
176} 175}
177 176