summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2014-09-29 06:16:15 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:33 -0400
commit719923ad9fa7c6b2ca68a25d1ce4518aab844bc2 (patch)
treebcb3dfbbd2968bf4b863f8990c11f05bc61ed6df
parent83bf2aa83d922080884a9fe547b656e24495e16e (diff)
gpu: nvgpu: rename gpu ioctls and structs to nvgpu
To help remove the nvhost dependency from nvgpu, rename ioctl defines and structures used by nvgpu such that nvhost is replaced by nvgpu. Duplicate some structures as needed. Update header guards and such accordingly. Change-Id: Ifc3a867713072bae70256502735583ab38381877 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/542620 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c60
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.h10
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c50
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.h8
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c166
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h22
-rw-r--r--drivers/gpu/nvgpu/gk20a/clk_gk20a.h15
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c64
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h15
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c90
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h8
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fb_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c21
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c23
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h31
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_allocator.h8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_scale.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c33
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h18
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c37
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h33
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c1
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c15
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.h11
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h14
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/therm_gk20a.h17
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c5
-rw-r--r--drivers/gpu/nvgpu/gm20b/regops_gm20b.c1
35 files changed, 385 insertions, 430 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
index a2741fe8..c13d055e 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/as_gk20a.c
3 *
4 * GK20A Address Spaces 2 * GK20A Address Spaces
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -22,6 +20,8 @@
22 20
23#include <trace/events/gk20a.h> 21#include <trace/events/gk20a.h>
24 22
23#include <linux/nvhost_as_ioctl.h>
24
25#include "gk20a.h" 25#include "gk20a.h"
26 26
27/* dumb allocator... */ 27/* dumb allocator... */
@@ -93,7 +93,7 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
93 93
94static int gk20a_as_ioctl_bind_channel( 94static int gk20a_as_ioctl_bind_channel(
95 struct gk20a_as_share *as_share, 95 struct gk20a_as_share *as_share,
96 struct nvhost_as_bind_channel_args *args) 96 struct nvgpu_as_bind_channel_args *args)
97{ 97{
98 int err = 0; 98 int err = 0;
99 struct channel_gk20a *ch; 99 struct channel_gk20a *ch;
@@ -118,7 +118,7 @@ static int gk20a_as_ioctl_bind_channel(
118 118
119static int gk20a_as_ioctl_alloc_space( 119static int gk20a_as_ioctl_alloc_space(
120 struct gk20a_as_share *as_share, 120 struct gk20a_as_share *as_share,
121 struct nvhost_as_alloc_space_args *args) 121 struct nvgpu_as_alloc_space_args *args)
122{ 122{
123 gk20a_dbg_fn(""); 123 gk20a_dbg_fn("");
124 return gk20a_vm_alloc_space(as_share, args); 124 return gk20a_vm_alloc_space(as_share, args);
@@ -126,7 +126,7 @@ static int gk20a_as_ioctl_alloc_space(
126 126
127static int gk20a_as_ioctl_free_space( 127static int gk20a_as_ioctl_free_space(
128 struct gk20a_as_share *as_share, 128 struct gk20a_as_share *as_share,
129 struct nvhost_as_free_space_args *args) 129 struct nvgpu_as_free_space_args *args)
130{ 130{
131 gk20a_dbg_fn(""); 131 gk20a_dbg_fn("");
132 return gk20a_vm_free_space(as_share, args); 132 return gk20a_vm_free_space(as_share, args);
@@ -134,12 +134,12 @@ static int gk20a_as_ioctl_free_space(
134 134
135static int gk20a_as_ioctl_map_buffer_ex( 135static int gk20a_as_ioctl_map_buffer_ex(
136 struct gk20a_as_share *as_share, 136 struct gk20a_as_share *as_share,
137 struct nvhost_as_map_buffer_ex_args *args) 137 struct nvgpu_as_map_buffer_ex_args *args)
138{ 138{
139 gk20a_dbg_fn(""); 139 gk20a_dbg_fn("");
140 140
141 return gk20a_vm_map_buffer(as_share, args->dmabuf_fd, 141 return gk20a_vm_map_buffer(as_share, args->dmabuf_fd,
142 &args->as_offset, args->flags, 142 &args->offset, args->flags,
143 args->kind, 143 args->kind,
144 args->buffer_offset, 144 args->buffer_offset,
145 args->mapping_size 145 args->mapping_size
@@ -148,10 +148,10 @@ static int gk20a_as_ioctl_map_buffer_ex(
148 148
149static int gk20a_as_ioctl_map_buffer( 149static int gk20a_as_ioctl_map_buffer(
150 struct gk20a_as_share *as_share, 150 struct gk20a_as_share *as_share,
151 struct nvhost_as_map_buffer_args *args) 151 struct nvgpu_as_map_buffer_args *args)
152{ 152{
153 gk20a_dbg_fn(""); 153 gk20a_dbg_fn("");
154 return gk20a_vm_map_buffer(as_share, args->nvmap_handle, 154 return gk20a_vm_map_buffer(as_share, args->dmabuf_fd,
155 &args->o_a.offset, 155 &args->o_a.offset,
156 args->flags, NV_KIND_DEFAULT, 156 args->flags, NV_KIND_DEFAULT,
157 0, 0); 157 0, 0);
@@ -160,7 +160,7 @@ static int gk20a_as_ioctl_map_buffer(
160 160
161static int gk20a_as_ioctl_unmap_buffer( 161static int gk20a_as_ioctl_unmap_buffer(
162 struct gk20a_as_share *as_share, 162 struct gk20a_as_share *as_share,
163 struct nvhost_as_unmap_buffer_args *args) 163 struct nvgpu_as_unmap_buffer_args *args)
164{ 164{
165 gk20a_dbg_fn(""); 165 gk20a_dbg_fn("");
166 return gk20a_vm_unmap_buffer(as_share, args->offset); 166 return gk20a_vm_unmap_buffer(as_share, args->offset);
@@ -214,14 +214,14 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
214 struct gk20a_as_share *as_share = filp->private_data; 214 struct gk20a_as_share *as_share = filp->private_data;
215 struct gk20a *g = gk20a_from_as(as_share->as); 215 struct gk20a *g = gk20a_from_as(as_share->as);
216 216
217 u8 buf[NVHOST_AS_IOCTL_MAX_ARG_SIZE]; 217 u8 buf[NVGPU_AS_IOCTL_MAX_ARG_SIZE];
218 218
219 if ((_IOC_TYPE(cmd) != NVHOST_AS_IOCTL_MAGIC) || 219 if ((_IOC_TYPE(cmd) != NVGPU_AS_IOCTL_MAGIC) ||
220 (_IOC_NR(cmd) == 0) || 220 (_IOC_NR(cmd) == 0) ||
221 (_IOC_NR(cmd) > NVHOST_AS_IOCTL_LAST)) 221 (_IOC_NR(cmd) > NVGPU_AS_IOCTL_LAST))
222 return -EINVAL; 222 return -EINVAL;
223 223
224 BUG_ON(_IOC_SIZE(cmd) > NVHOST_AS_IOCTL_MAX_ARG_SIZE); 224 BUG_ON(_IOC_SIZE(cmd) > NVGPU_AS_IOCTL_MAX_ARG_SIZE);
225 225
226 if (_IOC_DIR(cmd) & _IOC_WRITE) { 226 if (_IOC_DIR(cmd) & _IOC_WRITE) {
227 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) 227 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
@@ -233,17 +233,17 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
233 return err; 233 return err;
234 234
235 switch (cmd) { 235 switch (cmd) {
236 case NVHOST_AS_IOCTL_BIND_CHANNEL: 236 case NVGPU_AS_IOCTL_BIND_CHANNEL:
237 trace_gk20a_as_ioctl_bind_channel(dev_name(dev_from_gk20a(g))); 237 trace_gk20a_as_ioctl_bind_channel(dev_name(dev_from_gk20a(g)));
238 err = gk20a_as_ioctl_bind_channel(as_share, 238 err = gk20a_as_ioctl_bind_channel(as_share,
239 (struct nvhost_as_bind_channel_args *)buf); 239 (struct nvgpu_as_bind_channel_args *)buf);
240 240
241 break; 241 break;
242 case NVHOST32_AS_IOCTL_ALLOC_SPACE: 242 case NVGPU32_AS_IOCTL_ALLOC_SPACE:
243 { 243 {
244 struct nvhost32_as_alloc_space_args *args32 = 244 struct nvgpu32_as_alloc_space_args *args32 =
245 (struct nvhost32_as_alloc_space_args *)buf; 245 (struct nvgpu32_as_alloc_space_args *)buf;
246 struct nvhost_as_alloc_space_args args; 246 struct nvgpu_as_alloc_space_args args;
247 247
248 args.pages = args32->pages; 248 args.pages = args32->pages;
249 args.page_size = args32->page_size; 249 args.page_size = args32->page_size;
@@ -254,30 +254,30 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
254 args32->o_a.offset = args.o_a.offset; 254 args32->o_a.offset = args.o_a.offset;
255 break; 255 break;
256 } 256 }
257 case NVHOST_AS_IOCTL_ALLOC_SPACE: 257 case NVGPU_AS_IOCTL_ALLOC_SPACE:
258 trace_gk20a_as_ioctl_alloc_space(dev_name(dev_from_gk20a(g))); 258 trace_gk20a_as_ioctl_alloc_space(dev_name(dev_from_gk20a(g)));
259 err = gk20a_as_ioctl_alloc_space(as_share, 259 err = gk20a_as_ioctl_alloc_space(as_share,
260 (struct nvhost_as_alloc_space_args *)buf); 260 (struct nvgpu_as_alloc_space_args *)buf);
261 break; 261 break;
262 case NVHOST_AS_IOCTL_FREE_SPACE: 262 case NVGPU_AS_IOCTL_FREE_SPACE:
263 trace_gk20a_as_ioctl_free_space(dev_name(dev_from_gk20a(g))); 263 trace_gk20a_as_ioctl_free_space(dev_name(dev_from_gk20a(g)));
264 err = gk20a_as_ioctl_free_space(as_share, 264 err = gk20a_as_ioctl_free_space(as_share,
265 (struct nvhost_as_free_space_args *)buf); 265 (struct nvgpu_as_free_space_args *)buf);
266 break; 266 break;
267 case NVHOST_AS_IOCTL_MAP_BUFFER: 267 case NVGPU_AS_IOCTL_MAP_BUFFER:
268 trace_gk20a_as_ioctl_map_buffer(dev_name(dev_from_gk20a(g))); 268 trace_gk20a_as_ioctl_map_buffer(dev_name(dev_from_gk20a(g)));
269 err = gk20a_as_ioctl_map_buffer(as_share, 269 err = gk20a_as_ioctl_map_buffer(as_share,
270 (struct nvhost_as_map_buffer_args *)buf); 270 (struct nvgpu_as_map_buffer_args *)buf);
271 break; 271 break;
272 case NVHOST_AS_IOCTL_MAP_BUFFER_EX: 272 case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
273 trace_gk20a_as_ioctl_map_buffer(dev_name(dev_from_gk20a(g))); 273 trace_gk20a_as_ioctl_map_buffer(dev_name(dev_from_gk20a(g)));
274 err = gk20a_as_ioctl_map_buffer_ex(as_share, 274 err = gk20a_as_ioctl_map_buffer_ex(as_share,
275 (struct nvhost_as_map_buffer_ex_args *)buf); 275 (struct nvgpu_as_map_buffer_ex_args *)buf);
276 break; 276 break;
277 case NVHOST_AS_IOCTL_UNMAP_BUFFER: 277 case NVGPU_AS_IOCTL_UNMAP_BUFFER:
278 trace_gk20a_as_ioctl_unmap_buffer(dev_name(dev_from_gk20a(g))); 278 trace_gk20a_as_ioctl_unmap_buffer(dev_name(dev_from_gk20a(g)));
279 err = gk20a_as_ioctl_unmap_buffer(as_share, 279 err = gk20a_as_ioctl_unmap_buffer(as_share,
280 (struct nvhost_as_unmap_buffer_args *)buf); 280 (struct nvgpu_as_unmap_buffer_args *)buf);
281 break; 281 break;
282 default: 282 default:
283 dev_dbg(dev_from_gk20a(g), "unrecognized as ioctl: 0x%x", cmd); 283 dev_dbg(dev_from_gk20a(g), "unrecognized as ioctl: 0x%x", cmd);
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.h b/drivers/gpu/nvgpu/gk20a/as_gk20a.h
index be0e9707..457678ce 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.h
@@ -1,7 +1,5 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/as_gk20a.h 2 * GK20A Address Spaces
3 *
4 * GK20A Address Space
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7 * 5 *
@@ -14,15 +12,13 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 */ 14 */
17#ifndef __GK20A_AS_H 15#ifndef AS_GK20A_H
18#define __GK20A_AS_H 16#define AS_GK20A_H
19 17
20#include <linux/atomic.h> 18#include <linux/atomic.h>
21#include <linux/cdev.h> 19#include <linux/cdev.h>
22#include <linux/fs.h> 20#include <linux/fs.h>
23 21
24#include <linux/nvhost_as_ioctl.h>
25
26struct gk20a_as; 22struct gk20a_as;
27struct gk20a_as_share; 23struct gk20a_as_share;
28struct vm_gk20a; 24struct vm_gk20a;
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index c2e2cc98..4c33ea8d 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -16,7 +16,6 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <linux/nvhost.h>
20#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
21#include <linux/firmware.h> 20#include <linux/firmware.h>
22#include <linux/fs.h> 21#include <linux/fs.h>
@@ -30,6 +29,7 @@
30#include "fence_gk20a.h" 29#include "fence_gk20a.h"
31#include "gr_gk20a.h" 30#include "gr_gk20a.h"
32#include "debug_gk20a.h" 31#include "debug_gk20a.h"
32#include "semaphore_gk20a.h"
33 33
34#include "hw_ccsr_gk20a.h" 34#include "hw_ccsr_gk20a.h"
35#include "hw_pbdma_gk20a.h" 35#include "hw_pbdma_gk20a.h"
@@ -65,7 +65,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
65 65
66 for (i = 0; i < cde_ctx->num_obj_ids; i++) 66 for (i = 0; i < cde_ctx->num_obj_ids; i++)
67 gk20a_free_obj_ctx(cde_ctx->ch, 67 gk20a_free_obj_ctx(cde_ctx->ch,
68 &(struct nvhost_free_obj_ctx_args) 68 &(struct nvgpu_free_obj_ctx_args)
69 { cde_ctx->obj_ids[i] }); 69 { cde_ctx->obj_ids[i] });
70 70
71 kfree(cde_ctx->init_cmd); 71 kfree(cde_ctx->init_cmd);
@@ -400,7 +400,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
400 const struct firmware *img, 400 const struct firmware *img,
401 u32 required_class) 401 u32 required_class)
402{ 402{
403 struct nvhost_alloc_obj_ctx_args alloc_obj_ctx; 403 struct nvgpu_alloc_obj_ctx_args alloc_obj_ctx;
404 int err; 404 int err;
405 405
406 if (cde_ctx->num_obj_ids >= MAX_CDE_OBJ_IDS) { 406 if (cde_ctx->num_obj_ids >= MAX_CDE_OBJ_IDS) {
@@ -430,7 +430,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
430 struct gk20a_cde_cmd_elem *cmd_elem, 430 struct gk20a_cde_cmd_elem *cmd_elem,
431 u32 num_elems) 431 u32 num_elems)
432{ 432{
433 struct nvhost_gpfifo **gpfifo, *gpfifo_elem; 433 struct nvgpu_gpfifo **gpfifo, *gpfifo_elem;
434 u32 *num_entries; 434 u32 *num_entries;
435 int i; 435 int i;
436 436
@@ -448,7 +448,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
448 } 448 }
449 449
450 /* allocate gpfifo entries to be pushed */ 450 /* allocate gpfifo entries to be pushed */
451 *gpfifo = kzalloc(sizeof(struct nvhost_gpfifo) * num_elems, 451 *gpfifo = kzalloc(sizeof(struct nvgpu_gpfifo) * num_elems,
452 GFP_KERNEL); 452 GFP_KERNEL);
453 if (!*gpfifo) { 453 if (!*gpfifo) {
454 gk20a_warn(&cde_ctx->pdev->dev, "cde: could not allocate memory for gpfifo entries"); 454 gk20a_warn(&cde_ctx->pdev->dev, "cde: could not allocate memory for gpfifo entries");
@@ -574,10 +574,10 @@ deinit_image:
574} 574}
575 575
576static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx, 576static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
577 u32 op, struct nvhost_fence *fence, 577 u32 op, struct nvgpu_fence *fence,
578 u32 flags, struct gk20a_fence **fence_out) 578 u32 flags, struct gk20a_fence **fence_out)
579{ 579{
580 struct nvhost_gpfifo *gpfifo = NULL; 580 struct nvgpu_gpfifo *gpfifo = NULL;
581 int num_entries = 0; 581 int num_entries = 0;
582 582
583 /* check command type */ 583 /* check command type */
@@ -604,7 +604,7 @@ static int gk20a_cde_execute_buffer(struct gk20a_cde_ctx *cde_ctx,
604int gk20a_cde_convert(struct gk20a *g, struct dma_buf *src, 604int gk20a_cde_convert(struct gk20a *g, struct dma_buf *src,
605 struct dma_buf *dst, 605 struct dma_buf *dst,
606 s32 dst_kind, u64 dst_byte_offset, 606 s32 dst_kind, u64 dst_byte_offset,
607 u32 dst_size, struct nvhost_fence *fence, 607 u32 dst_size, struct nvgpu_fence *fence,
608 u32 __flags, struct gk20a_cde_param *params, 608 u32 __flags, struct gk20a_cde_param *params,
609 int num_params, struct gk20a_fence **fence_out) 609 int num_params, struct gk20a_fence **fence_out)
610{ 610{
@@ -637,7 +637,7 @@ int gk20a_cde_convert(struct gk20a *g, struct dma_buf *src,
637 /* map the destination buffer */ 637 /* map the destination buffer */
638 get_dma_buf(dst); /* a ref for gk20a_vm_map */ 638 get_dma_buf(dst); /* a ref for gk20a_vm_map */
639 dst_vaddr = gk20a_vm_map(g->cde_app.vm, dst, 0, 639 dst_vaddr = gk20a_vm_map(g->cde_app.vm, dst, 0,
640 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 640 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
641 dst_kind, NULL, true, 641 dst_kind, NULL, true,
642 gk20a_mem_flag_none, 642 gk20a_mem_flag_none,
643 0, 0); 643 0, 0);
@@ -655,7 +655,7 @@ int gk20a_cde_convert(struct gk20a *g, struct dma_buf *src,
655 /* map the source buffer to prevent premature release */ 655 /* map the source buffer to prevent premature release */
656 get_dma_buf(src); /* a ref for gk20a_vm_map */ 656 get_dma_buf(src); /* a ref for gk20a_vm_map */
657 src_vaddr = gk20a_vm_map(g->cde_app.vm, src, 0, 657 src_vaddr = gk20a_vm_map(g->cde_app.vm, src, 0,
658 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 658 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
659 dst_kind, NULL, true, 659 dst_kind, NULL, true,
660 gk20a_mem_flag_none, 660 gk20a_mem_flag_none,
661 0, 0); 661 0, 0);
@@ -736,7 +736,7 @@ int gk20a_cde_convert(struct gk20a *g, struct dma_buf *src,
736 736
737 /* take always the postfence as it is needed for protecting the 737 /* take always the postfence as it is needed for protecting the
738 * cde context */ 738 * cde context */
739 flags = __flags | NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET; 739 flags = __flags | NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET;
740 740
741 /* execute the conversion buffer */ 741 /* execute the conversion buffer */
742 err = gk20a_cde_execute_buffer(cde_ctx, TYPE_BUF_COMMAND_CONVERT, 742 err = gk20a_cde_execute_buffer(cde_ctx, TYPE_BUF_COMMAND_CONVERT,
@@ -788,7 +788,7 @@ int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
788 788
789 /* allocate gpfifo (1024 should be more than enough) */ 789 /* allocate gpfifo (1024 should be more than enough) */
790 err = gk20a_alloc_channel_gpfifo(ch, 790 err = gk20a_alloc_channel_gpfifo(ch,
791 &(struct nvhost_alloc_gpfifo_args){1024, 0}); 791 &(struct nvgpu_alloc_gpfifo_args){1024, 0});
792 if (err) { 792 if (err) {
793 gk20a_warn(&cde_ctx->pdev->dev, "cde: unable to allocate gpfifo"); 793 gk20a_warn(&cde_ctx->pdev->dev, "cde: unable to allocate gpfifo");
794 goto err_alloc_gpfifo; 794 goto err_alloc_gpfifo;
@@ -797,7 +797,7 @@ int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
797 /* map backing store to gpu virtual space */ 797 /* map backing store to gpu virtual space */
798 vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.sgt, 798 vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.sgt,
799 g->gr.compbit_store.size, 799 g->gr.compbit_store.size,
800 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 800 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
801 gk20a_mem_flag_read_only); 801 gk20a_mem_flag_read_only);
802 802
803 if (!vaddr) { 803 if (!vaddr) {
@@ -986,7 +986,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
986 struct gk20a *g, struct dma_buf *dmabuf, u32 consumer, 986 struct gk20a *g, struct dma_buf *dmabuf, u32 consumer,
987 u64 offset, u64 compbits_offset, 987 u64 offset, u64 compbits_offset,
988 u32 width, u32 height, u32 block_height_log2, 988 u32 width, u32 height, u32 block_height_log2,
989 u32 submit_flags, struct nvhost_fence *fence_in, 989 u32 submit_flags, struct nvgpu_fence *fence_in,
990 struct gk20a_fence **fence_out) 990 struct gk20a_fence **fence_out)
991{ 991{
992 struct gk20a_cde_param params[NUM_CDE_LAUNCH_PATCHES]; 992 struct gk20a_cde_param params[NUM_CDE_LAUNCH_PATCHES];
@@ -994,7 +994,7 @@ static int gk20a_buffer_convert_gpu_to_cde(
994 int err = 0; 994 int err = 0;
995 995
996 /* Compute per launch parameters */ 996 /* Compute per launch parameters */
997 const bool transpose = (consumer == NVHOST_GPU_COMPBITS_CDEV); 997 const bool transpose = (consumer == NVGPU_GPU_COMPBITS_CDEV);
998 const int transposed_width = transpose ? height : width; 998 const int transposed_width = transpose ? height : width;
999 const int transposed_height = transpose ? width : height; 999 const int transposed_height = transpose ? width : height;
1000 const int xtiles = (transposed_width + 7) >> 3; 1000 const int xtiles = (transposed_width + 7) >> 3;
@@ -1069,7 +1069,7 @@ int gk20a_prepare_compressible_read(
1069 struct gk20a *g, u32 buffer_fd, u32 request, u64 offset, 1069 struct gk20a *g, u32 buffer_fd, u32 request, u64 offset,
1070 u64 compbits_hoffset, u64 compbits_voffset, 1070 u64 compbits_hoffset, u64 compbits_voffset,
1071 u32 width, u32 height, u32 block_height_log2, 1071 u32 width, u32 height, u32 block_height_log2,
1072 u32 submit_flags, struct nvhost_fence *fence, 1072 u32 submit_flags, struct nvgpu_fence *fence,
1073 u32 *valid_compbits, u32 *zbc_color, 1073 u32 *valid_compbits, u32 *zbc_color,
1074 struct gk20a_fence **fence_out) 1074 struct gk20a_fence **fence_out)
1075{ 1075{
@@ -1092,7 +1092,7 @@ int gk20a_prepare_compressible_read(
1092 1092
1093 mutex_lock(&state->lock); 1093 mutex_lock(&state->lock);
1094 1094
1095 if (state->valid_compbits && request == NVHOST_GPU_COMPBITS_NONE) { 1095 if (state->valid_compbits && request == NVGPU_GPU_COMPBITS_NONE) {
1096 1096
1097 gk20a_fence_put(state->fence); 1097 gk20a_fence_put(state->fence);
1098 state->fence = NULL; 1098 state->fence = NULL;
@@ -1102,11 +1102,11 @@ int gk20a_prepare_compressible_read(
1102 goto out; 1102 goto out;
1103 } else if (missing_bits) { 1103 } else if (missing_bits) {
1104 struct gk20a_fence *new_fence = NULL; 1104 struct gk20a_fence *new_fence = NULL;
1105 if ((state->valid_compbits & NVHOST_GPU_COMPBITS_GPU) && 1105 if ((state->valid_compbits & NVGPU_GPU_COMPBITS_GPU) &&
1106 (missing_bits & NVHOST_GPU_COMPBITS_CDEH)) { 1106 (missing_bits & NVGPU_GPU_COMPBITS_CDEH)) {
1107 err = gk20a_buffer_convert_gpu_to_cde( 1107 err = gk20a_buffer_convert_gpu_to_cde(
1108 g, dmabuf, 1108 g, dmabuf,
1109 NVHOST_GPU_COMPBITS_CDEH, 1109 NVGPU_GPU_COMPBITS_CDEH,
1110 offset, compbits_hoffset, 1110 offset, compbits_hoffset,
1111 width, height, block_height_log2, 1111 width, height, block_height_log2,
1112 submit_flags, fence, 1112 submit_flags, fence,
@@ -1117,13 +1117,13 @@ int gk20a_prepare_compressible_read(
1117 /* CDEH bits generated, update state & fence */ 1117 /* CDEH bits generated, update state & fence */
1118 gk20a_fence_put(state->fence); 1118 gk20a_fence_put(state->fence);
1119 state->fence = new_fence; 1119 state->fence = new_fence;
1120 state->valid_compbits |= NVHOST_GPU_COMPBITS_CDEH; 1120 state->valid_compbits |= NVGPU_GPU_COMPBITS_CDEH;
1121 } 1121 }
1122 if ((state->valid_compbits & NVHOST_GPU_COMPBITS_GPU) && 1122 if ((state->valid_compbits & NVGPU_GPU_COMPBITS_GPU) &&
1123 (missing_bits & NVHOST_GPU_COMPBITS_CDEV)) { 1123 (missing_bits & NVGPU_GPU_COMPBITS_CDEV)) {
1124 err = gk20a_buffer_convert_gpu_to_cde( 1124 err = gk20a_buffer_convert_gpu_to_cde(
1125 g, dmabuf, 1125 g, dmabuf,
1126 NVHOST_GPU_COMPBITS_CDEV, 1126 NVGPU_GPU_COMPBITS_CDEV,
1127 offset, compbits_voffset, 1127 offset, compbits_voffset,
1128 width, height, block_height_log2, 1128 width, height, block_height_log2,
1129 submit_flags, fence, 1129 submit_flags, fence,
@@ -1134,7 +1134,7 @@ int gk20a_prepare_compressible_read(
1134 /* CDEH bits generated, update state & fence */ 1134 /* CDEH bits generated, update state & fence */
1135 gk20a_fence_put(state->fence); 1135 gk20a_fence_put(state->fence);
1136 state->fence = new_fence; 1136 state->fence = new_fence;
1137 state->valid_compbits |= NVHOST_GPU_COMPBITS_CDEV; 1137 state->valid_compbits |= NVGPU_GPU_COMPBITS_CDEV;
1138 } 1138 }
1139 } 1139 }
1140 1140
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
index 92757271..c427d4db 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
@@ -233,10 +233,10 @@ struct gk20a_cde_ctx {
233 233
234 u64 backing_store_vaddr; 234 u64 backing_store_vaddr;
235 235
236 struct nvhost_gpfifo *init_cmd; 236 struct nvgpu_gpfifo *init_cmd;
237 int init_cmd_num_entries; 237 int init_cmd_num_entries;
238 238
239 struct nvhost_gpfifo *convert_cmd; 239 struct nvgpu_gpfifo *convert_cmd;
240 int convert_cmd_num_entries; 240 int convert_cmd_num_entries;
241 241
242 struct kobj_attribute attr; 242 struct kobj_attribute attr;
@@ -260,7 +260,7 @@ int gk20a_init_cde_support(struct gk20a *g);
260int gk20a_cde_reload(struct gk20a *g); 260int gk20a_cde_reload(struct gk20a *g);
261int gk20a_cde_convert(struct gk20a *g, struct dma_buf *src, struct dma_buf *dst, 261int gk20a_cde_convert(struct gk20a *g, struct dma_buf *src, struct dma_buf *dst,
262 s32 dst_kind, u64 dst_word_offset, 262 s32 dst_kind, u64 dst_word_offset,
263 u32 dst_size, struct nvhost_fence *fence, 263 u32 dst_size, struct nvgpu_fence *fence,
264 u32 __flags, struct gk20a_cde_param *params, 264 u32 __flags, struct gk20a_cde_param *params,
265 int num_params, struct gk20a_fence **fence_out); 265 int num_params, struct gk20a_fence **fence_out);
266void gk20a_cde_debugfs_init(struct platform_device *dev); 266void gk20a_cde_debugfs_init(struct platform_device *dev);
@@ -269,7 +269,7 @@ int gk20a_prepare_compressible_read(
269 struct gk20a *g, u32 buffer_fd, u32 request, u64 offset, 269 struct gk20a *g, u32 buffer_fd, u32 request, u64 offset,
270 u64 compbits_hoffset, u64 compbits_voffset, 270 u64 compbits_hoffset, u64 compbits_voffset,
271 u32 width, u32 height, u32 block_height_log2, 271 u32 width, u32 height, u32 block_height_log2,
272 u32 submit_flags, struct nvhost_fence *fence, 272 u32 submit_flags, struct nvgpu_fence *fence,
273 u32 *valid_compbits, u32 *zbc_color, 273 u32 *valid_compbits, u32 *zbc_color,
274 struct gk20a_fence **fence_out); 274 struct gk20a_fence **fence_out);
275int gk20a_mark_compressible_write( 275int gk20a_mark_compressible_write(
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 34c95483..0e8eb497 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/channel_gk20a.c
3 *
4 * GK20A Graphics channel 2 * GK20A Graphics channel
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,9 +12,8 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21 18
22#include <linux/nvhost.h> 19#include <linux/nvhost.h>
@@ -497,15 +494,15 @@ static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch)
497} 494}
498 495
499static int gk20a_channel_cycle_stats(struct channel_gk20a *ch, 496static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
500 struct nvhost_cycle_stats_args *args) 497 struct nvgpu_cycle_stats_args *args)
501{ 498{
502 struct dma_buf *dmabuf; 499 struct dma_buf *dmabuf;
503 void *virtual_address; 500 void *virtual_address;
504 501
505 if (args->nvmap_handle && !ch->cyclestate.cyclestate_buffer_handler) { 502 if (args->dmabuf_fd && !ch->cyclestate.cyclestate_buffer_handler) {
506 503
507 /* set up new cyclestats buffer */ 504 /* set up new cyclestats buffer */
508 dmabuf = dma_buf_get(args->nvmap_handle); 505 dmabuf = dma_buf_get(args->dmabuf_fd);
509 if (IS_ERR(dmabuf)) 506 if (IS_ERR(dmabuf))
510 return PTR_ERR(dmabuf); 507 return PTR_ERR(dmabuf);
511 virtual_address = dma_buf_vmap(dmabuf); 508 virtual_address = dma_buf_vmap(dmabuf);
@@ -517,12 +514,12 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
517 ch->cyclestate.cyclestate_buffer_size = dmabuf->size; 514 ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
518 return 0; 515 return 0;
519 516
520 } else if (!args->nvmap_handle && 517 } else if (!args->dmabuf_fd &&
521 ch->cyclestate.cyclestate_buffer_handler) { 518 ch->cyclestate.cyclestate_buffer_handler) {
522 gk20a_free_cycle_stats_buffer(ch); 519 gk20a_free_cycle_stats_buffer(ch);
523 return 0; 520 return 0;
524 521
525 } else if (!args->nvmap_handle && 522 } else if (!args->dmabuf_fd &&
526 !ch->cyclestate.cyclestate_buffer_handler) { 523 !ch->cyclestate.cyclestate_buffer_handler) {
527 /* no requst from GL */ 524 /* no requst from GL */
528 return 0; 525 return 0;
@@ -535,7 +532,7 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
535#endif 532#endif
536 533
537static int gk20a_init_error_notifier(struct channel_gk20a *ch, 534static int gk20a_init_error_notifier(struct channel_gk20a *ch,
538 struct nvhost_set_error_notifier *args) { 535 struct nvgpu_set_error_notifier *args) {
539 void *va; 536 void *va;
540 537
541 struct dma_buf *dmabuf; 538 struct dma_buf *dmabuf;
@@ -566,7 +563,7 @@ static int gk20a_init_error_notifier(struct channel_gk20a *ch,
566 ch->error_notifier_ref = dmabuf; 563 ch->error_notifier_ref = dmabuf;
567 ch->error_notifier = va + args->offset; 564 ch->error_notifier = va + args->offset;
568 ch->error_notifier_va = va; 565 ch->error_notifier_va = va;
569 memset(ch->error_notifier, 0, sizeof(struct nvhost_notification)); 566 memset(ch->error_notifier, 0, sizeof(struct nvgpu_notification));
570 return 0; 567 return 0;
571} 568}
572 569
@@ -1104,8 +1101,9 @@ static void recycle_priv_cmdbuf(struct channel_gk20a *c)
1104 gk20a_dbg_fn("done"); 1101 gk20a_dbg_fn("done");
1105} 1102}
1106 1103
1104
1107int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c, 1105int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1108 struct nvhost_alloc_gpfifo_args *args) 1106 struct nvgpu_alloc_gpfifo_args *args)
1109{ 1107{
1110 struct gk20a *g = c->g; 1108 struct gk20a *g = c->g;
1111 struct device *d = dev_from_gk20a(g); 1109 struct device *d = dev_from_gk20a(g);
@@ -1119,7 +1117,7 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1119 and another one after, for internal usage. Triple the requested size. */ 1117 and another one after, for internal usage. Triple the requested size. */
1120 gpfifo_size = roundup_pow_of_two(args->num_entries * 3); 1118 gpfifo_size = roundup_pow_of_two(args->num_entries * 3);
1121 1119
1122 if (args->flags & NVHOST_ALLOC_GPFIFO_FLAGS_VPR_ENABLED) 1120 if (args->flags & NVGPU_ALLOC_GPFIFO_FLAGS_VPR_ENABLED)
1123 c->vpr = true; 1121 c->vpr = true;
1124 1122
1125 /* an address space needs to have been bound at this point. */ 1123 /* an address space needs to have been bound at this point. */
@@ -1496,10 +1494,10 @@ void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
1496} 1494}
1497 1495
1498int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, 1496int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1499 struct nvhost_gpfifo *gpfifo, 1497 struct nvgpu_gpfifo *gpfifo,
1500 u32 num_entries, 1498 u32 num_entries,
1501 u32 flags, 1499 u32 flags,
1502 struct nvhost_fence *fence, 1500 struct nvgpu_fence *fence,
1503 struct gk20a_fence **fence_out) 1501 struct gk20a_fence **fence_out)
1504{ 1502{
1505 struct gk20a *g = c->g; 1503 struct gk20a *g = c->g;
@@ -1514,13 +1512,13 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1514 /* we might need two extra gpfifo entries - one for pre fence 1512 /* we might need two extra gpfifo entries - one for pre fence
1515 * and one for post fence. */ 1513 * and one for post fence. */
1516 const int extra_entries = 2; 1514 const int extra_entries = 2;
1517 bool need_wfi = !(flags & NVHOST_SUBMIT_GPFIFO_FLAGS_SUPPRESS_WFI); 1515 bool need_wfi = !(flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SUPPRESS_WFI);
1518 1516
1519 if (c->has_timedout) 1517 if (c->has_timedout)
1520 return -ETIMEDOUT; 1518 return -ETIMEDOUT;
1521 1519
1522 if ((flags & (NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT | 1520 if ((flags & (NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT |
1523 NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET)) && 1521 NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET)) &&
1524 !fence) 1522 !fence)
1525 return -EINVAL; 1523 return -EINVAL;
1526 1524
@@ -1551,7 +1549,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1551 c->hw_chid, 1549 c->hw_chid,
1552 num_entries, 1550 num_entries,
1553 flags, 1551 flags,
1554 fence ? fence->syncpt_id : 0, 1552 fence ? fence->id : 0,
1555 fence ? fence->value : 0); 1553 fence ? fence->value : 0);
1556 check_gp_put(g, c); 1554 check_gp_put(g, c);
1557 update_gp_get(g, c); 1555 update_gp_get(g, c);
@@ -1603,13 +1601,13 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1603 * the only reason this isn't being unceremoniously killed is to 1601 * the only reason this isn't being unceremoniously killed is to
1604 * keep running some tests which trigger this condition 1602 * keep running some tests which trigger this condition
1605 */ 1603 */
1606 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT) { 1604 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT) {
1607 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) { 1605 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
1608 wait_fence_fd = fence->syncpt_id; 1606 wait_fence_fd = fence->id;
1609 err = c->sync->wait_fd(c->sync, wait_fence_fd, 1607 err = c->sync->wait_fd(c->sync, wait_fence_fd,
1610 &wait_cmd, &pre_fence); 1608 &wait_cmd, &pre_fence);
1611 } else { 1609 } else {
1612 err = c->sync->wait_syncpt(c->sync, fence->syncpt_id, 1610 err = c->sync->wait_syncpt(c->sync, fence->id,
1613 fence->value, &wait_cmd, &pre_fence); 1611 fence->value, &wait_cmd, &pre_fence);
1614 } 1612 }
1615 } 1613 }
@@ -1621,7 +1619,7 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1621 1619
1622 /* always insert syncpt increment at end of gpfifo submission 1620 /* always insert syncpt increment at end of gpfifo submission
1623 to keep track of method completion for idle railgating */ 1621 to keep track of method completion for idle railgating */
1624 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET) 1622 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET)
1625 err = c->sync->incr_user(c->sync, wait_fence_fd, &incr_cmd, 1623 err = c->sync->incr_user(c->sync, wait_fence_fd, &incr_cmd,
1626 &post_fence, need_wfi); 1624 &post_fence, need_wfi);
1627 else 1625 else
@@ -1822,7 +1820,7 @@ cleanup_put:
1822} 1820}
1823 1821
1824static int gk20a_channel_wait(struct channel_gk20a *ch, 1822static int gk20a_channel_wait(struct channel_gk20a *ch,
1825 struct nvhost_wait_args *args) 1823 struct nvgpu_wait_args *args)
1826{ 1824{
1827 struct device *d = dev_from_gk20a(ch->g); 1825 struct device *d = dev_from_gk20a(ch->g);
1828 struct dma_buf *dmabuf; 1826 struct dma_buf *dmabuf;
@@ -1839,14 +1837,14 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
1839 if (ch->has_timedout) 1837 if (ch->has_timedout)
1840 return -ETIMEDOUT; 1838 return -ETIMEDOUT;
1841 1839
1842 if (args->timeout == NVHOST_NO_TIMEOUT) 1840 if (args->timeout == NVGPU_NO_TIMEOUT)
1843 timeout = MAX_SCHEDULE_TIMEOUT; 1841 timeout = MAX_SCHEDULE_TIMEOUT;
1844 else 1842 else
1845 timeout = (u32)msecs_to_jiffies(args->timeout); 1843 timeout = (u32)msecs_to_jiffies(args->timeout);
1846 1844
1847 switch (args->type) { 1845 switch (args->type) {
1848 case NVHOST_WAIT_TYPE_NOTIFIER: 1846 case NVGPU_WAIT_TYPE_NOTIFIER:
1849 id = args->condition.notifier.nvmap_handle; 1847 id = args->condition.notifier.dmabuf_fd;
1850 offset = args->condition.notifier.offset; 1848 offset = args->condition.notifier.offset;
1851 1849
1852 dmabuf = dma_buf_get(id); 1850 dmabuf = dma_buf_get(id);
@@ -1891,9 +1889,9 @@ notif_clean_up:
1891 dma_buf_vunmap(dmabuf, notif); 1889 dma_buf_vunmap(dmabuf, notif);
1892 return ret; 1890 return ret;
1893 1891
1894 case NVHOST_WAIT_TYPE_SEMAPHORE: 1892 case NVGPU_WAIT_TYPE_SEMAPHORE:
1895 ret = gk20a_channel_wait_semaphore(ch, 1893 ret = gk20a_channel_wait_semaphore(ch,
1896 args->condition.semaphore.nvmap_handle, 1894 args->condition.semaphore.dmabuf_fd,
1897 args->condition.semaphore.offset, 1895 args->condition.semaphore.offset,
1898 args->condition.semaphore.payload, 1896 args->condition.semaphore.payload,
1899 timeout); 1897 timeout);
@@ -1948,7 +1946,7 @@ static void gk20a_channel_events_clear(struct channel_gk20a_poll_events *ev)
1948} 1946}
1949 1947
1950static int gk20a_channel_events_ctrl(struct channel_gk20a *ch, 1948static int gk20a_channel_events_ctrl(struct channel_gk20a *ch,
1951 struct nvhost_channel_events_ctrl_args *args) 1949 struct nvgpu_channel_events_ctrl_args *args)
1952{ 1950{
1953 int ret = 0; 1951 int ret = 0;
1954 1952
@@ -1956,15 +1954,15 @@ static int gk20a_channel_events_ctrl(struct channel_gk20a *ch,
1956 "channel events ctrl cmd %d", args->cmd); 1954 "channel events ctrl cmd %d", args->cmd);
1957 1955
1958 switch (args->cmd) { 1956 switch (args->cmd) {
1959 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL_CMD_ENABLE: 1957 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_ENABLE:
1960 gk20a_channel_events_enable(&ch->poll_events); 1958 gk20a_channel_events_enable(&ch->poll_events);
1961 break; 1959 break;
1962 1960
1963 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL_CMD_DISABLE: 1961 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_DISABLE:
1964 gk20a_channel_events_disable(&ch->poll_events); 1962 gk20a_channel_events_disable(&ch->poll_events);
1965 break; 1963 break;
1966 1964
1967 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL_CMD_CLEAR: 1965 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL_CMD_CLEAR:
1968 gk20a_channel_events_clear(&ch->poll_events); 1966 gk20a_channel_events_clear(&ch->poll_events);
1969 break; 1967 break;
1970 1968
@@ -2027,15 +2025,15 @@ static int gk20a_channel_set_priority(struct channel_gk20a *ch,
2027 u32 timeslice_timeout; 2025 u32 timeslice_timeout;
2028 /* set priority of graphics channel */ 2026 /* set priority of graphics channel */
2029 switch (priority) { 2027 switch (priority) {
2030 case NVHOST_PRIORITY_LOW: 2028 case NVGPU_PRIORITY_LOW:
2031 /* 64 << 3 = 512us */ 2029 /* 64 << 3 = 512us */
2032 timeslice_timeout = 64; 2030 timeslice_timeout = 64;
2033 break; 2031 break;
2034 case NVHOST_PRIORITY_MEDIUM: 2032 case NVGPU_PRIORITY_MEDIUM:
2035 /* 128 << 3 = 1024us */ 2033 /* 128 << 3 = 1024us */
2036 timeslice_timeout = 128; 2034 timeslice_timeout = 128;
2037 break; 2035 break;
2038 case NVHOST_PRIORITY_HIGH: 2036 case NVGPU_PRIORITY_HIGH:
2039 /* 255 << 3 = 2048us */ 2037 /* 255 << 3 = 2048us */
2040 timeslice_timeout = 255; 2038 timeslice_timeout = 255;
2041 break; 2039 break;
@@ -2049,7 +2047,7 @@ static int gk20a_channel_set_priority(struct channel_gk20a *ch,
2049} 2047}
2050 2048
2051static int gk20a_channel_zcull_bind(struct channel_gk20a *ch, 2049static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
2052 struct nvhost_zcull_bind_args *args) 2050 struct nvgpu_zcull_bind_args *args)
2053{ 2051{
2054 struct gk20a *g = ch->g; 2052 struct gk20a *g = ch->g;
2055 struct gr_gk20a *gr = &g->gr; 2053 struct gr_gk20a *gr = &g->gr;
@@ -2145,7 +2143,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g)
2145 2143
2146static int gk20a_ioctl_channel_submit_gpfifo( 2144static int gk20a_ioctl_channel_submit_gpfifo(
2147 struct channel_gk20a *ch, 2145 struct channel_gk20a *ch,
2148 struct nvhost_submit_gpfifo_args *args) 2146 struct nvgpu_submit_gpfifo_args *args)
2149{ 2147{
2150 struct gk20a_fence *fence_out; 2148 struct gk20a_fence *fence_out;
2151 void *gpfifo; 2149 void *gpfifo;
@@ -2157,7 +2155,7 @@ static int gk20a_ioctl_channel_submit_gpfifo(
2157 if (ch->has_timedout) 2155 if (ch->has_timedout)
2158 return -ETIMEDOUT; 2156 return -ETIMEDOUT;
2159 2157
2160 size = args->num_entries * sizeof(struct nvhost_gpfifo); 2158 size = args->num_entries * sizeof(struct nvgpu_gpfifo);
2161 2159
2162 gpfifo = kzalloc(size, GFP_KERNEL); 2160 gpfifo = kzalloc(size, GFP_KERNEL);
2163 if (!gpfifo) 2161 if (!gpfifo)
@@ -2177,15 +2175,15 @@ static int gk20a_ioctl_channel_submit_gpfifo(
2177 goto clean_up; 2175 goto clean_up;
2178 2176
2179 /* Convert fence_out to something we can pass back to user space. */ 2177 /* Convert fence_out to something we can pass back to user space. */
2180 if (args->flags & NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET) { 2178 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
2181 if (args->flags & NVHOST_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) { 2179 if (args->flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
2182 int fd = gk20a_fence_install_fd(fence_out); 2180 int fd = gk20a_fence_install_fd(fence_out);
2183 if (fd < 0) 2181 if (fd < 0)
2184 ret = fd; 2182 ret = fd;
2185 else 2183 else
2186 args->fence.syncpt_id = fd; 2184 args->fence.id = fd;
2187 } else { 2185 } else {
2188 args->fence.syncpt_id = fence_out->syncpt_id; 2186 args->fence.id = fence_out->syncpt_id;
2189 args->fence.value = fence_out->syncpt_value; 2187 args->fence.value = fence_out->syncpt_value;
2190 } 2188 }
2191 } 2189 }
@@ -2211,15 +2209,15 @@ long gk20a_channel_ioctl(struct file *filp,
2211{ 2209{
2212 struct channel_gk20a *ch = filp->private_data; 2210 struct channel_gk20a *ch = filp->private_data;
2213 struct platform_device *dev = ch->g->dev; 2211 struct platform_device *dev = ch->g->dev;
2214 u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE]; 2212 u8 buf[NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE];
2215 int err = 0; 2213 int err = 0;
2216 2214
2217 gk20a_dbg_fn("start %d", _IOC_NR(cmd)); 2215 gk20a_dbg_fn("start %d", _IOC_NR(cmd));
2218 2216
2219 if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) || 2217 if ((_IOC_TYPE(cmd) != NVGPU_IOCTL_MAGIC) ||
2220 (_IOC_NR(cmd) == 0) || 2218 (_IOC_NR(cmd) == 0) ||
2221 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) || 2219 (_IOC_NR(cmd) > NVGPU_IOCTL_CHANNEL_LAST) ||
2222 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE)) 2220 (_IOC_SIZE(cmd) > NVGPU_IOCTL_CHANNEL_MAX_ARG_SIZE))
2223 return -EINVAL; 2221 return -EINVAL;
2224 2222
2225 if (_IOC_DIR(cmd) & _IOC_WRITE) { 2223 if (_IOC_DIR(cmd) & _IOC_WRITE) {
@@ -2228,7 +2226,7 @@ long gk20a_channel_ioctl(struct file *filp,
2228 } 2226 }
2229 2227
2230 switch (cmd) { 2228 switch (cmd) {
2231 case NVHOST_IOCTL_CHANNEL_OPEN: 2229 case NVGPU_IOCTL_CHANNEL_OPEN:
2232 { 2230 {
2233 int fd; 2231 int fd;
2234 struct file *file; 2232 struct file *file;
@@ -2263,12 +2261,12 @@ long gk20a_channel_ioctl(struct file *filp,
2263 break; 2261 break;
2264 } 2262 }
2265 2263
2266 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd; 2264 ((struct nvgpu_channel_open_args *)buf)->channel_fd = fd;
2267 break; 2265 break;
2268 } 2266 }
2269 case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD: 2267 case NVGPU_IOCTL_CHANNEL_SET_NVMAP_FD:
2270 break; 2268 break;
2271 case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX: 2269 case NVGPU_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
2272 err = gk20a_busy(dev); 2270 err = gk20a_busy(dev);
2273 if (err) { 2271 if (err) {
2274 dev_err(&dev->dev, 2272 dev_err(&dev->dev,
@@ -2277,10 +2275,10 @@ long gk20a_channel_ioctl(struct file *filp,
2277 return err; 2275 return err;
2278 } 2276 }
2279 err = ch->g->ops.gr.alloc_obj_ctx(ch, 2277 err = ch->g->ops.gr.alloc_obj_ctx(ch,
2280 (struct nvhost_alloc_obj_ctx_args *)buf); 2278 (struct nvgpu_alloc_obj_ctx_args *)buf);
2281 gk20a_idle(dev); 2279 gk20a_idle(dev);
2282 break; 2280 break;
2283 case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX: 2281 case NVGPU_IOCTL_CHANNEL_FREE_OBJ_CTX:
2284 err = gk20a_busy(dev); 2282 err = gk20a_busy(dev);
2285 if (err) { 2283 if (err) {
2286 dev_err(&dev->dev, 2284 dev_err(&dev->dev,
@@ -2289,10 +2287,10 @@ long gk20a_channel_ioctl(struct file *filp,
2289 return err; 2287 return err;
2290 } 2288 }
2291 err = ch->g->ops.gr.free_obj_ctx(ch, 2289 err = ch->g->ops.gr.free_obj_ctx(ch,
2292 (struct nvhost_free_obj_ctx_args *)buf); 2290 (struct nvgpu_free_obj_ctx_args *)buf);
2293 gk20a_idle(dev); 2291 gk20a_idle(dev);
2294 break; 2292 break;
2295 case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO: 2293 case NVGPU_IOCTL_CHANNEL_ALLOC_GPFIFO:
2296 err = gk20a_busy(dev); 2294 err = gk20a_busy(dev);
2297 if (err) { 2295 if (err) {
2298 dev_err(&dev->dev, 2296 dev_err(&dev->dev,
@@ -2301,14 +2299,14 @@ long gk20a_channel_ioctl(struct file *filp,
2301 return err; 2299 return err;
2302 } 2300 }
2303 err = gk20a_alloc_channel_gpfifo(ch, 2301 err = gk20a_alloc_channel_gpfifo(ch,
2304 (struct nvhost_alloc_gpfifo_args *)buf); 2302 (struct nvgpu_alloc_gpfifo_args *)buf);
2305 gk20a_idle(dev); 2303 gk20a_idle(dev);
2306 break; 2304 break;
2307 case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO: 2305 case NVGPU_IOCTL_CHANNEL_SUBMIT_GPFIFO:
2308 err = gk20a_ioctl_channel_submit_gpfifo(ch, 2306 err = gk20a_ioctl_channel_submit_gpfifo(ch,
2309 (struct nvhost_submit_gpfifo_args *)buf); 2307 (struct nvgpu_submit_gpfifo_args *)buf);
2310 break; 2308 break;
2311 case NVHOST_IOCTL_CHANNEL_WAIT: 2309 case NVGPU_IOCTL_CHANNEL_WAIT:
2312 err = gk20a_busy(dev); 2310 err = gk20a_busy(dev);
2313 if (err) { 2311 if (err) {
2314 dev_err(&dev->dev, 2312 dev_err(&dev->dev,
@@ -2317,10 +2315,10 @@ long gk20a_channel_ioctl(struct file *filp,
2317 return err; 2315 return err;
2318 } 2316 }
2319 err = gk20a_channel_wait(ch, 2317 err = gk20a_channel_wait(ch,
2320 (struct nvhost_wait_args *)buf); 2318 (struct nvgpu_wait_args *)buf);
2321 gk20a_idle(dev); 2319 gk20a_idle(dev);
2322 break; 2320 break;
2323 case NVHOST_IOCTL_CHANNEL_ZCULL_BIND: 2321 case NVGPU_IOCTL_CHANNEL_ZCULL_BIND:
2324 err = gk20a_busy(dev); 2322 err = gk20a_busy(dev);
2325 if (err) { 2323 if (err) {
2326 dev_err(&dev->dev, 2324 dev_err(&dev->dev,
@@ -2329,10 +2327,10 @@ long gk20a_channel_ioctl(struct file *filp,
2329 return err; 2327 return err;
2330 } 2328 }
2331 err = gk20a_channel_zcull_bind(ch, 2329 err = gk20a_channel_zcull_bind(ch,
2332 (struct nvhost_zcull_bind_args *)buf); 2330 (struct nvgpu_zcull_bind_args *)buf);
2333 gk20a_idle(dev); 2331 gk20a_idle(dev);
2334 break; 2332 break;
2335 case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER: 2333 case NVGPU_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
2336 err = gk20a_busy(dev); 2334 err = gk20a_busy(dev);
2337 if (err) { 2335 if (err) {
2338 dev_err(&dev->dev, 2336 dev_err(&dev->dev,
@@ -2341,11 +2339,11 @@ long gk20a_channel_ioctl(struct file *filp,
2341 return err; 2339 return err;
2342 } 2340 }
2343 err = gk20a_init_error_notifier(ch, 2341 err = gk20a_init_error_notifier(ch,
2344 (struct nvhost_set_error_notifier *)buf); 2342 (struct nvgpu_set_error_notifier *)buf);
2345 gk20a_idle(dev); 2343 gk20a_idle(dev);
2346 break; 2344 break;
2347#ifdef CONFIG_GK20A_CYCLE_STATS 2345#ifdef CONFIG_GK20A_CYCLE_STATS
2348 case NVHOST_IOCTL_CHANNEL_CYCLE_STATS: 2346 case NVGPU_IOCTL_CHANNEL_CYCLE_STATS:
2349 err = gk20a_busy(dev); 2347 err = gk20a_busy(dev);
2350 if (err) { 2348 if (err) {
2351 dev_err(&dev->dev, 2349 dev_err(&dev->dev,
@@ -2354,37 +2352,37 @@ long gk20a_channel_ioctl(struct file *filp,
2354 return err; 2352 return err;
2355 } 2353 }
2356 err = gk20a_channel_cycle_stats(ch, 2354 err = gk20a_channel_cycle_stats(ch,
2357 (struct nvhost_cycle_stats_args *)buf); 2355 (struct nvgpu_cycle_stats_args *)buf);
2358 gk20a_idle(dev); 2356 gk20a_idle(dev);
2359 break; 2357 break;
2360#endif 2358#endif
2361 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT: 2359 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT:
2362 { 2360 {
2363 u32 timeout = 2361 u32 timeout =
2364 (u32)((struct nvhost_set_timeout_args *)buf)->timeout; 2362 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
2365 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 2363 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
2366 timeout, ch->hw_chid); 2364 timeout, ch->hw_chid);
2367 ch->timeout_ms_max = timeout; 2365 ch->timeout_ms_max = timeout;
2368 break; 2366 break;
2369 } 2367 }
2370 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX: 2368 case NVGPU_IOCTL_CHANNEL_SET_TIMEOUT_EX:
2371 { 2369 {
2372 u32 timeout = 2370 u32 timeout =
2373 (u32)((struct nvhost_set_timeout_args *)buf)->timeout; 2371 (u32)((struct nvgpu_set_timeout_args *)buf)->timeout;
2374 bool timeout_debug_dump = !((u32) 2372 bool timeout_debug_dump = !((u32)
2375 ((struct nvhost_set_timeout_ex_args *)buf)->flags & 2373 ((struct nvgpu_set_timeout_ex_args *)buf)->flags &
2376 (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP)); 2374 (1 << NVGPU_TIMEOUT_FLAG_DISABLE_DUMP));
2377 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d", 2375 gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
2378 timeout, ch->hw_chid); 2376 timeout, ch->hw_chid);
2379 ch->timeout_ms_max = timeout; 2377 ch->timeout_ms_max = timeout;
2380 ch->timeout_debug_dump = timeout_debug_dump; 2378 ch->timeout_debug_dump = timeout_debug_dump;
2381 break; 2379 break;
2382 } 2380 }
2383 case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT: 2381 case NVGPU_IOCTL_CHANNEL_GET_TIMEDOUT:
2384 ((struct nvhost_get_param_args *)buf)->value = 2382 ((struct nvgpu_get_param_args *)buf)->value =
2385 ch->has_timedout; 2383 ch->has_timedout;
2386 break; 2384 break;
2387 case NVHOST_IOCTL_CHANNEL_SET_PRIORITY: 2385 case NVGPU_IOCTL_CHANNEL_SET_PRIORITY:
2388 err = gk20a_busy(dev); 2386 err = gk20a_busy(dev);
2389 if (err) { 2387 if (err) {
2390 dev_err(&dev->dev, 2388 dev_err(&dev->dev,
@@ -2393,10 +2391,10 @@ long gk20a_channel_ioctl(struct file *filp,
2393 return err; 2391 return err;
2394 } 2392 }
2395 gk20a_channel_set_priority(ch, 2393 gk20a_channel_set_priority(ch,
2396 ((struct nvhost_set_priority_args *)buf)->priority); 2394 ((struct nvgpu_set_priority_args *)buf)->priority);
2397 gk20a_idle(dev); 2395 gk20a_idle(dev);
2398 break; 2396 break;
2399 case NVHOST_IOCTL_CHANNEL_ENABLE: 2397 case NVGPU_IOCTL_CHANNEL_ENABLE:
2400 err = gk20a_busy(dev); 2398 err = gk20a_busy(dev);
2401 if (err) { 2399 if (err) {
2402 dev_err(&dev->dev, 2400 dev_err(&dev->dev,
@@ -2410,7 +2408,7 @@ long gk20a_channel_ioctl(struct file *filp,
2410 ccsr_channel_enable_set_true_f()); 2408 ccsr_channel_enable_set_true_f());
2411 gk20a_idle(dev); 2409 gk20a_idle(dev);
2412 break; 2410 break;
2413 case NVHOST_IOCTL_CHANNEL_DISABLE: 2411 case NVGPU_IOCTL_CHANNEL_DISABLE:
2414 err = gk20a_busy(dev); 2412 err = gk20a_busy(dev);
2415 if (err) { 2413 if (err) {
2416 dev_err(&dev->dev, 2414 dev_err(&dev->dev,
@@ -2424,7 +2422,7 @@ long gk20a_channel_ioctl(struct file *filp,
2424 ccsr_channel_enable_clr_true_f()); 2422 ccsr_channel_enable_clr_true_f());
2425 gk20a_idle(dev); 2423 gk20a_idle(dev);
2426 break; 2424 break;
2427 case NVHOST_IOCTL_CHANNEL_PREEMPT: 2425 case NVGPU_IOCTL_CHANNEL_PREEMPT:
2428 err = gk20a_busy(dev); 2426 err = gk20a_busy(dev);
2429 if (err) { 2427 if (err) {
2430 dev_err(&dev->dev, 2428 dev_err(&dev->dev,
@@ -2435,7 +2433,7 @@ long gk20a_channel_ioctl(struct file *filp,
2435 err = gk20a_fifo_preempt(ch->g, ch); 2433 err = gk20a_fifo_preempt(ch->g, ch);
2436 gk20a_idle(dev); 2434 gk20a_idle(dev);
2437 break; 2435 break;
2438 case NVHOST_IOCTL_CHANNEL_FORCE_RESET: 2436 case NVGPU_IOCTL_CHANNEL_FORCE_RESET:
2439 err = gk20a_busy(dev); 2437 err = gk20a_busy(dev);
2440 if (err) { 2438 if (err) {
2441 dev_err(&dev->dev, 2439 dev_err(&dev->dev,
@@ -2446,9 +2444,9 @@ long gk20a_channel_ioctl(struct file *filp,
2446 err = gk20a_fifo_force_reset_ch(ch, true); 2444 err = gk20a_fifo_force_reset_ch(ch, true);
2447 gk20a_idle(dev); 2445 gk20a_idle(dev);
2448 break; 2446 break;
2449 case NVHOST_IOCTL_CHANNEL_EVENTS_CTRL: 2447 case NVGPU_IOCTL_CHANNEL_EVENTS_CTRL:
2450 err = gk20a_channel_events_ctrl(ch, 2448 err = gk20a_channel_events_ctrl(ch,
2451 (struct nvhost_channel_events_ctrl_args *)buf); 2449 (struct nvgpu_channel_events_ctrl_args *)buf);
2452 break; 2450 break;
2453 default: 2451 default:
2454 dev_dbg(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd); 2452 dev_dbg(&dev->dev, "unrecognized ioctl cmd: 0x%x", cmd);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index bb9f314c..ff056140 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/channel_gk20a.h
3 *
4 * GK20A graphics channel 2 * GK20A graphics channel
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,12 +12,11 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21#ifndef __CHANNEL_GK20A_H__ 18#ifndef CHANNEL_GK20A_H
22#define __CHANNEL_GK20A_H__ 19#define CHANNEL_GK20A_H
23 20
24#include <linux/log2.h> 21#include <linux/log2.h>
25#include <linux/slab.h> 22#include <linux/slab.h>
@@ -37,6 +34,7 @@ struct gk20a_fence;
37 34
38#include "mm_gk20a.h" 35#include "mm_gk20a.h"
39#include "gr_gk20a.h" 36#include "gr_gk20a.h"
37#include "fence_gk20a.h"
40 38
41struct gpfifo { 39struct gpfifo {
42 u32 entry0; 40 u32 entry0;
@@ -148,7 +146,7 @@ struct channel_gk20a {
148 bool timeout_debug_dump; 146 bool timeout_debug_dump;
149 147
150 struct dma_buf *error_notifier_ref; 148 struct dma_buf *error_notifier_ref;
151 struct nvhost_notification *error_notifier; 149 struct nvgpu_notification *error_notifier;
152 void *error_notifier_va; 150 void *error_notifier_va;
153 151
154 struct gk20a_channel_sync *sync; 152 struct gk20a_channel_sync *sync;
@@ -201,14 +199,14 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g);
201void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a); 199void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a);
202 200
203int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, 201int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
204 struct nvhost_gpfifo *gpfifo, 202 struct nvgpu_gpfifo *gpfifo,
205 u32 num_entries, 203 u32 num_entries,
206 u32 flags, 204 u32 flags,
207 struct nvhost_fence *fence, 205 struct nvgpu_fence *fence,
208 struct gk20a_fence **fence_out); 206 struct gk20a_fence **fence_out);
209 207
210int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c, 208int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
211 struct nvhost_alloc_gpfifo_args *args); 209 struct nvgpu_alloc_gpfifo_args *args);
212 210
213void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a); 211void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a);
214void channel_gk20a_disable(struct channel_gk20a *ch); 212void channel_gk20a_disable(struct channel_gk20a *ch);
@@ -217,4 +215,4 @@ void channel_gk20a_free_inst(struct gk20a *g, struct channel_gk20a *ch);
217int channel_gk20a_setup_ramfc(struct channel_gk20a *c, 215int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
218 u64 gpfifo_base, u32 gpfifo_entries); 216 u64 gpfifo_base, u32 gpfifo_entries);
219 217
220#endif /*__CHANNEL_GK20A_H__*/ 218#endif /* CHANNEL_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/clk_gk20a.h b/drivers/gpu/nvgpu/gk20a/clk_gk20a.h
index ed54ba7a..255c1a7c 100644
--- a/drivers/gpu/nvgpu/gk20a/clk_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/clk_gk20a.h
@@ -1,8 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/clk_gk20a.h
3 *
4 * GK20A Graphics
5 *
6 * Copyright (c) 2011 - 2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2011 - 2014, NVIDIA CORPORATION. All rights reserved.
7 * 3 *
8 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -14,12 +10,11 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 11 * more details.
16 * 12 *
17 * You should have received a copy of the GNU General Public License along with 13 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 15 */
21#ifndef _NVHOST_CLK_GK20A_H_ 16#ifndef CLK_GK20A_H
22#define _NVHOST_CLK_GK20A_H_ 17#define CLK_GK20A_H
23 18
24#include <linux/mutex.h> 19#include <linux/mutex.h>
25 20
@@ -102,4 +97,4 @@ static inline unsigned long rate_gpu_to_gpc2clk(unsigned long rate)
102 return (rate * 2) / KHZ; 97 return (rate * 2) / KHZ;
103} 98}
104 99
105#endif /* _NVHOST_CLK_GK20A_H_ */ 100#endif /* CLK_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
index d1560cad..0feb92a5 100644
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * GK20A Ctrl
3 *
4 * Copyright (c) 2011-2014, NVIDIA Corporation. All rights reserved. 2 * Copyright (c) 2011-2014, NVIDIA Corporation. All rights reserved.
5 * 3 *
6 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -57,9 +55,9 @@ int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
57static long 55static long
58gk20a_ctrl_ioctl_gpu_characteristics( 56gk20a_ctrl_ioctl_gpu_characteristics(
59 struct gk20a *g, 57 struct gk20a *g,
60 struct nvhost_gpu_get_characteristics *request) 58 struct nvgpu_gpu_get_characteristics *request)
61{ 59{
62 struct nvhost_gpu_characteristics *pgpu = &g->gpu_characteristics; 60 struct nvgpu_gpu_characteristics *pgpu = &g->gpu_characteristics;
63 long err = 0; 61 long err = 0;
64 62
65 if (request->gpu_characteristics_buf_size > 0) { 63 if (request->gpu_characteristics_buf_size > 0) {
@@ -81,14 +79,14 @@ gk20a_ctrl_ioctl_gpu_characteristics(
81 79
82static int gk20a_ctrl_prepare_compressible_read( 80static int gk20a_ctrl_prepare_compressible_read(
83 struct gk20a *g, 81 struct gk20a *g,
84 struct nvhost_gpu_prepare_compressible_read_args *args) 82 struct nvgpu_gpu_prepare_compressible_read_args *args)
85{ 83{
86 struct nvhost_fence fence; 84 struct nvgpu_fence fence;
87 struct gk20a_fence *fence_out = NULL; 85 struct gk20a_fence *fence_out = NULL;
88 int ret = 0; 86 int ret = 0;
89 int flags = args->submit_flags; 87 int flags = args->submit_flags;
90 88
91 fence.syncpt_id = args->fence.syncpt_id; 89 fence.id = args->fence.syncpt_id;
92 fence.value = args->fence.syncpt_value; 90 fence.value = args->fence.syncpt_value;
93 91
94 ret = gk20a_busy(g->dev); 92 ret = gk20a_busy(g->dev);
@@ -107,8 +105,8 @@ static int gk20a_ctrl_prepare_compressible_read(
107 return ret; 105 return ret;
108 106
109 /* Convert fence_out to something we can pass back to user space. */ 107 /* Convert fence_out to something we can pass back to user space. */
110 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET) { 108 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_FENCE_GET) {
111 if (flags & NVHOST_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) { 109 if (flags & NVGPU_SUBMIT_GPFIFO_FLAGS_SYNC_FENCE) {
112 if (fence_out) { 110 if (fence_out) {
113 int fd = gk20a_fence_install_fd(fence_out); 111 int fd = gk20a_fence_install_fd(fence_out);
114 if (fd < 0) 112 if (fd < 0)
@@ -136,7 +134,7 @@ static int gk20a_ctrl_prepare_compressible_read(
136 134
137static int gk20a_ctrl_mark_compressible_write( 135static int gk20a_ctrl_mark_compressible_write(
138 struct gk20a *g, 136 struct gk20a *g,
139 struct nvhost_gpu_mark_compressible_write_args *args) 137 struct nvgpu_gpu_mark_compressible_write_args *args)
140{ 138{
141 int ret = 0; 139 int ret = 0;
142 140
@@ -154,11 +152,11 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
154{ 152{
155 struct platform_device *dev = filp->private_data; 153 struct platform_device *dev = filp->private_data;
156 struct gk20a *g = get_gk20a(dev); 154 struct gk20a *g = get_gk20a(dev);
157 struct nvhost_gpu_zcull_get_ctx_size_args *get_ctx_size_args; 155 struct nvgpu_gpu_zcull_get_ctx_size_args *get_ctx_size_args;
158 struct nvhost_gpu_zcull_get_info_args *get_info_args; 156 struct nvgpu_gpu_zcull_get_info_args *get_info_args;
159 struct nvhost_gpu_zbc_set_table_args *set_table_args; 157 struct nvgpu_gpu_zbc_set_table_args *set_table_args;
160 struct nvhost_gpu_zbc_query_table_args *query_table_args; 158 struct nvgpu_gpu_zbc_query_table_args *query_table_args;
161 u8 buf[NVHOST_GPU_IOCTL_MAX_ARG_SIZE]; 159 u8 buf[NVGPU_GPU_IOCTL_MAX_ARG_SIZE];
162 struct gr_zcull_info *zcull_info; 160 struct gr_zcull_info *zcull_info;
163 struct zbc_entry *zbc_val; 161 struct zbc_entry *zbc_val;
164 struct zbc_query_params *zbc_tbl; 162 struct zbc_query_params *zbc_tbl;
@@ -169,12 +167,12 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
169 167
170 gk20a_dbg_fn(""); 168 gk20a_dbg_fn("");
171 169
172 if ((_IOC_TYPE(cmd) != NVHOST_GPU_IOCTL_MAGIC) || 170 if ((_IOC_TYPE(cmd) != NVGPU_GPU_IOCTL_MAGIC) ||
173 (_IOC_NR(cmd) == 0) || 171 (_IOC_NR(cmd) == 0) ||
174 (_IOC_NR(cmd) > NVHOST_GPU_IOCTL_LAST)) 172 (_IOC_NR(cmd) > NVGPU_GPU_IOCTL_LAST))
175 return -EINVAL; 173 return -EINVAL;
176 174
177 BUG_ON(_IOC_SIZE(cmd) > NVHOST_GPU_IOCTL_MAX_ARG_SIZE); 175 BUG_ON(_IOC_SIZE(cmd) > NVGPU_GPU_IOCTL_MAX_ARG_SIZE);
178 176
179 if (_IOC_DIR(cmd) & _IOC_WRITE) { 177 if (_IOC_DIR(cmd) & _IOC_WRITE) {
180 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) 178 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
@@ -190,16 +188,16 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
190 } 188 }
191 189
192 switch (cmd) { 190 switch (cmd) {
193 case NVHOST_GPU_IOCTL_ZCULL_GET_CTX_SIZE: 191 case NVGPU_GPU_IOCTL_ZCULL_GET_CTX_SIZE:
194 get_ctx_size_args = (struct nvhost_gpu_zcull_get_ctx_size_args *)buf; 192 get_ctx_size_args = (struct nvgpu_gpu_zcull_get_ctx_size_args *)buf;
195 193
196 get_ctx_size_args->size = gr_gk20a_get_ctxsw_zcull_size(g, &g->gr); 194 get_ctx_size_args->size = gr_gk20a_get_ctxsw_zcull_size(g, &g->gr);
197 195
198 break; 196 break;
199 case NVHOST_GPU_IOCTL_ZCULL_GET_INFO: 197 case NVGPU_GPU_IOCTL_ZCULL_GET_INFO:
200 get_info_args = (struct nvhost_gpu_zcull_get_info_args *)buf; 198 get_info_args = (struct nvgpu_gpu_zcull_get_info_args *)buf;
201 199
202 memset(get_info_args, 0, sizeof(struct nvhost_gpu_zcull_get_info_args)); 200 memset(get_info_args, 0, sizeof(struct nvgpu_gpu_zcull_get_info_args));
203 201
204 zcull_info = kzalloc(sizeof(struct gr_zcull_info), GFP_KERNEL); 202 zcull_info = kzalloc(sizeof(struct gr_zcull_info), GFP_KERNEL);
205 if (zcull_info == NULL) 203 if (zcull_info == NULL)
@@ -224,8 +222,8 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
224 222
225 kfree(zcull_info); 223 kfree(zcull_info);
226 break; 224 break;
227 case NVHOST_GPU_IOCTL_ZBC_SET_TABLE: 225 case NVGPU_GPU_IOCTL_ZBC_SET_TABLE:
228 set_table_args = (struct nvhost_gpu_zbc_set_table_args *)buf; 226 set_table_args = (struct nvgpu_gpu_zbc_set_table_args *)buf;
229 227
230#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 228#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
231 if (platform->virtual_dev) 229 if (platform->virtual_dev)
@@ -264,8 +262,8 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
264 if (zbc_val) 262 if (zbc_val)
265 kfree(zbc_val); 263 kfree(zbc_val);
266 break; 264 break;
267 case NVHOST_GPU_IOCTL_ZBC_QUERY_TABLE: 265 case NVGPU_GPU_IOCTL_ZBC_QUERY_TABLE:
268 query_table_args = (struct nvhost_gpu_zbc_query_table_args *)buf; 266 query_table_args = (struct nvgpu_gpu_zbc_query_table_args *)buf;
269 267
270 zbc_tbl = kzalloc(sizeof(struct zbc_query_params), GFP_KERNEL); 268 zbc_tbl = kzalloc(sizeof(struct zbc_query_params), GFP_KERNEL);
271 if (zbc_tbl == NULL) 269 if (zbc_tbl == NULL)
@@ -303,17 +301,17 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
303 kfree(zbc_tbl); 301 kfree(zbc_tbl);
304 break; 302 break;
305 303
306 case NVHOST_GPU_IOCTL_GET_CHARACTERISTICS: 304 case NVGPU_GPU_IOCTL_GET_CHARACTERISTICS:
307 err = gk20a_ctrl_ioctl_gpu_characteristics( 305 err = gk20a_ctrl_ioctl_gpu_characteristics(
308 g, (struct nvhost_gpu_get_characteristics *)buf); 306 g, (struct nvgpu_gpu_get_characteristics *)buf);
309 break; 307 break;
310 case NVHOST_GPU_IOCTL_PREPARE_COMPRESSIBLE_READ: 308 case NVGPU_GPU_IOCTL_PREPARE_COMPRESSIBLE_READ:
311 err = gk20a_ctrl_prepare_compressible_read(g, 309 err = gk20a_ctrl_prepare_compressible_read(g,
312 (struct nvhost_gpu_prepare_compressible_read_args *)buf); 310 (struct nvgpu_gpu_prepare_compressible_read_args *)buf);
313 break; 311 break;
314 case NVHOST_GPU_IOCTL_MARK_COMPRESSIBLE_WRITE: 312 case NVGPU_GPU_IOCTL_MARK_COMPRESSIBLE_WRITE:
315 err = gk20a_ctrl_mark_compressible_write(g, 313 err = gk20a_ctrl_mark_compressible_write(g,
316 (struct nvhost_gpu_mark_compressible_write_args *)buf); 314 (struct nvgpu_gpu_mark_compressible_write_args *)buf);
317 break; 315 break;
318 default: 316 default:
319 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd); 317 dev_dbg(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h
index ac9c253e..26ca4e20 100644
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.h
@@ -1,8 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/gk20a_ctrl.h
3 *
4 * GK20A Ctrl
5 *
6 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
7 * 3 *
8 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -14,15 +10,14 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 11 * more details.
16 * 12 *
17 * You should have received a copy of the GNU General Public License along with 13 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 15 */
21#ifndef _NVHOST_GK20A_CTRL_H_ 16#ifndef CTRL_GK20A_H
22#define _NVHOST_GK20A_CTRL_H_ 17#define CTRL_GK20A_H
23 18
24int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp); 19int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp);
25int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp); 20int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp);
26long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 21long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
27 22
28#endif /* _NVHOST_GK20A_CTRL_H_ */ 23#endif /* CTRL_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 1fefb659..2f1a08d8 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -154,7 +154,7 @@ static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
154} 154}
155 155
156static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, 156static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
157 struct nvhost_dbg_gpu_events_ctrl_args *args) 157 struct nvgpu_dbg_gpu_events_ctrl_args *args)
158{ 158{
159 int ret = 0; 159 int ret = 0;
160 160
@@ -167,15 +167,15 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
167 } 167 }
168 168
169 switch (args->cmd) { 169 switch (args->cmd) {
170 case NVHOST_DBG_GPU_EVENTS_CTRL_CMD_ENABLE: 170 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE:
171 gk20a_dbg_gpu_events_enable(dbg_s); 171 gk20a_dbg_gpu_events_enable(dbg_s);
172 break; 172 break;
173 173
174 case NVHOST_DBG_GPU_EVENTS_CTRL_CMD_DISABLE: 174 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE:
175 gk20a_dbg_gpu_events_disable(dbg_s); 175 gk20a_dbg_gpu_events_disable(dbg_s);
176 break; 176 break;
177 177
178 case NVHOST_DBG_GPU_EVENTS_CTRL_CMD_CLEAR: 178 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR:
179 gk20a_dbg_gpu_events_clear(dbg_s); 179 gk20a_dbg_gpu_events_clear(dbg_s);
180 break; 180 break;
181 181
@@ -278,7 +278,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s)
278 * which called powergate disable ioctl, to be killed without calling 278 * which called powergate disable ioctl, to be killed without calling
279 * powergate enable ioctl 279 * powergate enable ioctl
280 */ 280 */
281 dbg_set_powergate(dbg_s, NVHOST_DBG_GPU_POWERGATE_MODE_ENABLE); 281 dbg_set_powergate(dbg_s, NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
282 282
283 dbg_s->ch = NULL; 283 dbg_s->ch = NULL;
284 fput(dbg_s->ch_f); 284 fput(dbg_s->ch_f);
@@ -307,7 +307,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
307} 307}
308 308
309static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, 309static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
310 struct nvhost_dbg_gpu_bind_channel_args *args) 310 struct nvgpu_dbg_gpu_bind_channel_args *args)
311{ 311{
312 struct file *f; 312 struct file *f;
313 struct gk20a *g; 313 struct gk20a *g;
@@ -350,31 +350,31 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
350 return 0; 350 return 0;
351} 351}
352 352
353static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, 353static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
354 struct nvhost_dbg_gpu_exec_reg_ops_args *args); 354 struct nvgpu_dbg_gpu_exec_reg_ops_args *args);
355 355
356static int nvhost_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, 356static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
357 struct nvhost_dbg_gpu_powergate_args *args); 357 struct nvgpu_dbg_gpu_powergate_args *args);
358 358
359static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, 359static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
360 struct nvhost_dbg_gpu_smpc_ctxsw_mode_args *args); 360 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
361 361
362long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, 362long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
363 unsigned long arg) 363 unsigned long arg)
364{ 364{
365 struct dbg_session_gk20a *dbg_s = filp->private_data; 365 struct dbg_session_gk20a *dbg_s = filp->private_data;
366 struct gk20a *g = get_gk20a(dbg_s->pdev); 366 struct gk20a *g = get_gk20a(dbg_s->pdev);
367 u8 buf[NVHOST_DBG_GPU_IOCTL_MAX_ARG_SIZE]; 367 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
368 int err = 0; 368 int err = 0;
369 369
370 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 370 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
371 371
372 if ((_IOC_TYPE(cmd) != NVHOST_DBG_GPU_IOCTL_MAGIC) || 372 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
373 (_IOC_NR(cmd) == 0) || 373 (_IOC_NR(cmd) == 0) ||
374 (_IOC_NR(cmd) > NVHOST_DBG_GPU_IOCTL_LAST)) 374 (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST))
375 return -EINVAL; 375 return -EINVAL;
376 376
377 BUG_ON(_IOC_SIZE(cmd) > NVHOST_DBG_GPU_IOCTL_MAX_ARG_SIZE); 377 BUG_ON(_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE);
378 378
379 if (_IOC_DIR(cmd) & _IOC_WRITE) { 379 if (_IOC_DIR(cmd) & _IOC_WRITE) {
380 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) 380 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
@@ -390,32 +390,32 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
390 } 390 }
391 391
392 switch (cmd) { 392 switch (cmd) {
393 case NVHOST_DBG_GPU_IOCTL_BIND_CHANNEL: 393 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
394 err = dbg_bind_channel_gk20a(dbg_s, 394 err = dbg_bind_channel_gk20a(dbg_s,
395 (struct nvhost_dbg_gpu_bind_channel_args *)buf); 395 (struct nvgpu_dbg_gpu_bind_channel_args *)buf);
396 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 396 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
397 break; 397 break;
398 398
399 case NVHOST_DBG_GPU_IOCTL_REG_OPS: 399 case NVGPU_DBG_GPU_IOCTL_REG_OPS:
400 err = nvhost_ioctl_channel_reg_ops(dbg_s, 400 err = nvgpu_ioctl_channel_reg_ops(dbg_s,
401 (struct nvhost_dbg_gpu_exec_reg_ops_args *)buf); 401 (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf);
402 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 402 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
403 break; 403 break;
404 404
405 case NVHOST_DBG_GPU_IOCTL_POWERGATE: 405 case NVGPU_DBG_GPU_IOCTL_POWERGATE:
406 err = nvhost_ioctl_powergate_gk20a(dbg_s, 406 err = nvgpu_ioctl_powergate_gk20a(dbg_s,
407 (struct nvhost_dbg_gpu_powergate_args *)buf); 407 (struct nvgpu_dbg_gpu_powergate_args *)buf);
408 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 408 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
409 break; 409 break;
410 410
411 case NVHOST_DBG_GPU_IOCTL_EVENTS_CTRL: 411 case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL:
412 err = gk20a_dbg_gpu_events_ctrl(dbg_s, 412 err = gk20a_dbg_gpu_events_ctrl(dbg_s,
413 (struct nvhost_dbg_gpu_events_ctrl_args *)buf); 413 (struct nvgpu_dbg_gpu_events_ctrl_args *)buf);
414 break; 414 break;
415 415
416 case NVHOST_DBG_GPU_IOCTL_SMPC_CTXSW_MODE: 416 case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE:
417 err = nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s, 417 err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s,
418 (struct nvhost_dbg_gpu_smpc_ctxsw_mode_args *)buf); 418 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
419 break; 419 break;
420 420
421 default: 421 default:
@@ -456,15 +456,15 @@ static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
456 456
457} 457}
458 458
459static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, 459static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
460 struct nvhost_dbg_gpu_exec_reg_ops_args *args) 460 struct nvgpu_dbg_gpu_exec_reg_ops_args *args)
461{ 461{
462 int err = 0, powergate_err = 0; 462 int err = 0, powergate_err = 0;
463 bool is_pg_disabled = false; 463 bool is_pg_disabled = false;
464 464
465 struct device *dev = dbg_s->dev; 465 struct device *dev = dbg_s->dev;
466 struct gk20a *g = get_gk20a(dbg_s->pdev); 466 struct gk20a *g = get_gk20a(dbg_s->pdev);
467 struct nvhost_dbg_gpu_reg_op *ops; 467 struct nvgpu_dbg_gpu_reg_op *ops;
468 u64 ops_size = sizeof(ops[0]) * args->num_ops; 468 u64 ops_size = sizeof(ops[0]) * args->num_ops;
469 469
470 gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size); 470 gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
@@ -506,7 +506,7 @@ static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
506 506
507 if (!dbg_s->is_pg_disabled) { 507 if (!dbg_s->is_pg_disabled) {
508 powergate_err = dbg_set_powergate(dbg_s, 508 powergate_err = dbg_set_powergate(dbg_s,
509 NVHOST_DBG_GPU_POWERGATE_MODE_DISABLE); 509 NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE);
510 is_pg_disabled = true; 510 is_pg_disabled = true;
511 } 511 }
512 512
@@ -515,7 +515,7 @@ static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
515 /* enable powergate, if previously disabled */ 515 /* enable powergate, if previously disabled */
516 if (is_pg_disabled) { 516 if (is_pg_disabled) {
517 powergate_err = dbg_set_powergate(dbg_s, 517 powergate_err = dbg_set_powergate(dbg_s,
518 NVHOST_DBG_GPU_POWERGATE_MODE_ENABLE); 518 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
519 } 519 }
520 } 520 }
521 521
@@ -554,7 +554,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
554 dev_name(dbg_s->dev), powermode); 554 dev_name(dbg_s->dev), powermode);
555 555
556 switch (powermode) { 556 switch (powermode) {
557 case NVHOST_DBG_GPU_POWERGATE_MODE_DISABLE: 557 case NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE:
558 /* save off current powergate, clk state. 558 /* save off current powergate, clk state.
559 * set gpu module's can_powergate = 0. 559 * set gpu module's can_powergate = 0.
560 * set gpu module's clk to max. 560 * set gpu module's clk to max.
@@ -595,7 +595,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
595 dbg_s->is_pg_disabled = true; 595 dbg_s->is_pg_disabled = true;
596 break; 596 break;
597 597
598 case NVHOST_DBG_GPU_POWERGATE_MODE_ENABLE: 598 case NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE:
599 /* restore (can) powergate, clk state */ 599 /* restore (can) powergate, clk state */
600 /* release pending exceptions to fault/be handled as usual */ 600 /* release pending exceptions to fault/be handled as usual */
601 /*TBD: ordering of these? */ 601 /*TBD: ordering of these? */
@@ -640,8 +640,8 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
640 return err; 640 return err;
641} 641}
642 642
643static int nvhost_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, 643static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
644 struct nvhost_dbg_gpu_powergate_args *args) 644 struct nvgpu_dbg_gpu_powergate_args *args)
645{ 645{
646 int err; 646 int err;
647 struct gk20a *g = get_gk20a(dbg_s->pdev); 647 struct gk20a *g = get_gk20a(dbg_s->pdev);
@@ -654,8 +654,8 @@ static int nvhost_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
654 return err; 654 return err;
655} 655}
656 656
657static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, 657static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
658 struct nvhost_dbg_gpu_smpc_ctxsw_mode_args *args) 658 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
659{ 659{
660 int err; 660 int err;
661 struct gk20a *g = get_gk20a(dbg_s->pdev); 661 struct gk20a *g = get_gk20a(dbg_s->pdev);
@@ -677,7 +677,7 @@ static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
677 } 677 }
678 678
679 err = gr_gk20a_update_smpc_ctxsw_mode(g, ch_gk20a, 679 err = gr_gk20a_update_smpc_ctxsw_mode(g, ch_gk20a,
680 args->mode == NVHOST_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); 680 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
681 if (err) { 681 if (err) {
682 gk20a_err(dev_from_gk20a(dbg_s->g), 682 gk20a_err(dev_from_gk20a(dbg_s->g),
683 "error (%d) during smpc ctxsw mode update\n", err); 683 "error (%d) during smpc ctxsw mode update\n", err);
@@ -688,12 +688,12 @@ static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
688 * it was already swapped out in/out once or not, etc. 688 * it was already swapped out in/out once or not, etc.
689 */ 689 */
690 { 690 {
691 struct nvhost_dbg_gpu_reg_op ops[4]; 691 struct nvgpu_dbg_gpu_reg_op ops[4];
692 int i; 692 int i;
693 for (i = 0; i < ARRAY_SIZE(ops); i++) { 693 for (i = 0; i < ARRAY_SIZE(ops); i++) {
694 ops[i].op = NVHOST_DBG_GPU_REG_OP_WRITE_32; 694 ops[i].op = NVGPU_DBG_GPU_REG_OP_WRITE_32;
695 ops[i].type = NVHOST_DBG_GPU_REG_OP_TYPE_GR_CTX; 695 ops[i].type = NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX;
696 ops[i].status = NVHOST_DBG_GPU_REG_OP_STATUS_SUCCESS; 696 ops[i].status = NVGPU_DBG_GPU_REG_OP_STATUS_SUCCESS;
697 ops[i].value_hi = 0; 697 ops[i].value_hi = 0;
698 ops[i].and_n_mask_lo = 0; 698 ops[i].and_n_mask_lo = 0;
699 ops[i].and_n_mask_hi = 0; 699 ops[i].and_n_mask_hi = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
index 49827608..27084c0d 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
@@ -15,8 +15,8 @@
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18#ifndef __DBG_GPU_GK20A_H_ 18#ifndef DBG_GPU_GK20A_H
19#define __DBG_GPU_GK20A_H_ 19#define DBG_GPU_GK20A_H
20#include <linux/poll.h> 20#include <linux/poll.h>
21 21
22/* module debug driver interface */ 22/* module debug driver interface */
@@ -33,7 +33,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *fault_ch);
33 33
34struct dbg_gpu_session_ops { 34struct dbg_gpu_session_ops {
35 int (*exec_reg_ops)(struct dbg_session_gk20a *dbg_s, 35 int (*exec_reg_ops)(struct dbg_session_gk20a *dbg_s,
36 struct nvhost_dbg_gpu_reg_op *ops, 36 struct nvgpu_dbg_gpu_reg_op *ops,
37 u64 num_ops); 37 u64 num_ops);
38}; 38};
39 39
@@ -80,4 +80,4 @@ struct dbg_session_gk20a {
80 80
81extern struct dbg_gpu_session_ops dbg_gpu_session_ops_gk20a; 81extern struct dbg_gpu_session_ops dbg_gpu_session_ops_gk20a;
82 82
83#endif /* __DBG_GPU_GK20A_H_ */ 83#endif /* DBG_GPU_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index f5e0b73d..f41d883f 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -14,7 +14,10 @@
14 * 14 *
15 */ 15 */
16 16
17#ifdef CONFIG_TEGRA_GK20A
17#include <linux/nvhost.h> 18#include <linux/nvhost.h>
19#endif
20
18#include <linux/debugfs.h> 21#include <linux/debugfs.h>
19#include <linux/seq_file.h> 22#include <linux/seq_file.h>
20 23
@@ -141,6 +144,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
141 gk20a_mem_rd32(inst_ptr, ram_fc_semaphorec_w()), 144 gk20a_mem_rd32(inst_ptr, ram_fc_semaphorec_w()),
142 gk20a_mem_rd32(inst_ptr, ram_fc_semaphored_w())); 145 gk20a_mem_rd32(inst_ptr, ram_fc_semaphored_w()));
143 146
147#ifdef CONFIG_TEGRA_GK20A
144 if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v()) 148 if ((pbdma_syncpointb_op_v(syncpointb) == pbdma_syncpointb_op_wait_v())
145 && (pbdma_syncpointb_wait_switch_v(syncpointb) == 149 && (pbdma_syncpointb_wait_switch_v(syncpointb) ==
146 pbdma_syncpointb_wait_switch_en_v())) 150 pbdma_syncpointb_wait_switch_en_v()))
@@ -150,6 +154,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
150 nvhost_syncpt_get_name(g->host1x_dev, 154 nvhost_syncpt_get_name(g->host1x_dev,
151 pbdma_syncpointb_syncpt_index_v(syncpointb)), 155 pbdma_syncpointb_syncpt_index_v(syncpointb)),
152 pbdma_syncpointa_payload_v(syncpointa)); 156 pbdma_syncpointa_payload_v(syncpointa));
157#endif
153 158
154 gk20a_debug_output(o, "\n"); 159 gk20a_debug_output(o, "\n");
155} 160}
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.h b/drivers/gpu/nvgpu/gk20a/debug_gk20a.h
index cd2e09c3..c70b19d9 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.h
@@ -17,6 +17,8 @@
17#ifndef _DEBUG_GK20A_H_ 17#ifndef _DEBUG_GK20A_H_
18#define _DEBUG_GK20A_H_ 18#define _DEBUG_GK20A_H_
19 19
20struct platform_device;
21
20extern unsigned int gk20a_debug_trace_cmdbuf; 22extern unsigned int gk20a_debug_trace_cmdbuf;
21 23
22void gk20a_debug_dump(struct platform_device *pdev); 24void gk20a_debug_dump(struct platform_device *pdev);
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.h b/drivers/gpu/nvgpu/gk20a/fb_gk20a.h
index 34c21c9b..49dd5fd7 100644
--- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * GK20A FB
3 *
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
5 * 3 *
6 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -13,8 +11,8 @@
13 * more details. 11 * more details.
14 */ 12 */
15 13
16#ifndef _NVHOST_GK20A_FB 14#ifndef FB_GK20A_H
17#define _NVHOST_GK20A_FB 15#define FB_GK20A_H
18struct gk20a; 16struct gk20a;
19 17
20void gk20a_init_fb(struct gpu_ops *gops); 18void gk20a_init_fb(struct gpu_ops *gops);
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
index 4b98a4e7..32c66037 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -1,8 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/fence_gk20a.c
3 *
4 * GK20A Fences
5 *
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
7 * 3 *
8 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -31,6 +27,7 @@
31 27
32#ifdef CONFIG_TEGRA_GK20A 28#ifdef CONFIG_TEGRA_GK20A
33#include <linux/nvhost.h> 29#include <linux/nvhost.h>
30#include <linux/nvhost_ioctl.h>
34#endif 31#endif
35 32
36struct gk20a_fence_ops { 33struct gk20a_fence_ops {
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 0c8bc6f4..05377c3d 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/fifo_gk20a.c
3 *
4 * GK20A Graphics FIFO (gr host) 2 * GK20A Graphics FIFO (gr host)
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -27,6 +25,7 @@
27 25
28#include "gk20a.h" 26#include "gk20a.h"
29#include "debug_gk20a.h" 27#include "debug_gk20a.h"
28#include "semaphore_gk20a.h"
30#include "hw_fifo_gk20a.h" 29#include "hw_fifo_gk20a.h"
31#include "hw_pbdma_gk20a.h" 30#include "hw_pbdma_gk20a.h"
32#include "hw_ccsr_gk20a.h" 31#include "hw_ccsr_gk20a.h"
@@ -917,11 +916,11 @@ static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
917 * error condition. 916 * error condition.
918 * Don't overwrite error flag. */ 917 * Don't overwrite error flag. */
919 /* Fifo timeout debug spew is controlled by user */ 918 /* Fifo timeout debug spew is controlled by user */
920 if (err == NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT) 919 if (err == NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT)
921 verbose = ch->timeout_debug_dump; 920 verbose = ch->timeout_debug_dump;
922 } else { 921 } else {
923 gk20a_set_error_notifier(ch, 922 gk20a_set_error_notifier(ch,
924 NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT); 923 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
925 } 924 }
926 } 925 }
927 /* mark channel as faulted */ 926 /* mark channel as faulted */
@@ -1294,13 +1293,13 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
1294 mutex_lock(&tsg->ch_list_lock); 1293 mutex_lock(&tsg->ch_list_lock);
1295 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) { 1294 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
1296 gk20a_set_error_notifier(ch_tsg, 1295 gk20a_set_error_notifier(ch_tsg,
1297 NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR); 1296 NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1298 } 1297 }
1299 mutex_unlock(&tsg->ch_list_lock); 1298 mutex_unlock(&tsg->ch_list_lock);
1300 gk20a_fifo_recover_tsg(ch->g, ch->tsgid, verbose); 1299 gk20a_fifo_recover_tsg(ch->g, ch->tsgid, verbose);
1301 } else { 1300 } else {
1302 gk20a_set_error_notifier(ch, 1301 gk20a_set_error_notifier(ch,
1303 NVHOST_CHANNEL_RESETCHANNEL_VERIF_ERROR); 1302 NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1304 gk20a_fifo_recover_ch(ch->g, ch->hw_chid, verbose); 1303 gk20a_fifo_recover_ch(ch->g, ch->hw_chid, verbose);
1305 } 1304 }
1306 1305
@@ -1364,7 +1363,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1364 if (gk20a_channel_update_and_check_timeout(ch, 1363 if (gk20a_channel_update_and_check_timeout(ch,
1365 GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) { 1364 GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) {
1366 gk20a_set_error_notifier(ch, 1365 gk20a_set_error_notifier(ch,
1367 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); 1366 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1368 gk20a_err(dev_from_gk20a(g), 1367 gk20a_err(dev_from_gk20a(g),
1369 "fifo sched ctxsw timeout error:" 1368 "fifo sched ctxsw timeout error:"
1370 "engine = %u, ch = %d", engine_id, id); 1369 "engine = %u, ch = %d", engine_id, id);
@@ -1504,7 +1503,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
1504 struct channel_gk20a *ch = &f->channel[id]; 1503 struct channel_gk20a *ch = &f->channel[id];
1505 1504
1506 gk20a_set_error_notifier(ch, 1505 gk20a_set_error_notifier(ch,
1507 NVHOST_CHANNEL_PBDMA_ERROR); 1506 NVGPU_CHANNEL_PBDMA_ERROR);
1508 gk20a_fifo_recover_ch(g, id, true); 1507 gk20a_fifo_recover_ch(g, id, true);
1509 } else if (fifo_pbdma_status_id_type_v(status) 1508 } else if (fifo_pbdma_status_id_type_v(status)
1510 == fifo_pbdma_status_id_type_tsgid_v()) { 1509 == fifo_pbdma_status_id_type_tsgid_v()) {
@@ -1514,7 +1513,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
1514 mutex_lock(&tsg->ch_list_lock); 1513 mutex_lock(&tsg->ch_list_lock);
1515 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1514 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1516 gk20a_set_error_notifier(ch, 1515 gk20a_set_error_notifier(ch,
1517 NVHOST_CHANNEL_PBDMA_ERROR); 1516 NVGPU_CHANNEL_PBDMA_ERROR);
1518 } 1517 }
1519 mutex_unlock(&tsg->ch_list_lock); 1518 mutex_unlock(&tsg->ch_list_lock);
1520 gk20a_fifo_recover_tsg(g, id, true); 1519 gk20a_fifo_recover_tsg(g, id, true);
@@ -1644,7 +1643,7 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
1644 mutex_lock(&tsg->ch_list_lock); 1643 mutex_lock(&tsg->ch_list_lock);
1645 list_for_each_entry(ch, &tsg->ch_list, ch_entry) { 1644 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1646 gk20a_set_error_notifier(ch, 1645 gk20a_set_error_notifier(ch,
1647 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); 1646 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1648 } 1647 }
1649 mutex_unlock(&tsg->ch_list_lock); 1648 mutex_unlock(&tsg->ch_list_lock);
1650 gk20a_fifo_recover_tsg(g, id, true); 1649 gk20a_fifo_recover_tsg(g, id, true);
@@ -1655,7 +1654,7 @@ static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
1655 "preempt channel %d timeout\n", id); 1654 "preempt channel %d timeout\n", id);
1656 1655
1657 gk20a_set_error_notifier(ch, 1656 gk20a_set_error_notifier(ch,
1658 NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT); 1657 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1659 gk20a_fifo_recover_ch(g, id, true); 1658 gk20a_fifo_recover_ch(g, id, true);
1660 } 1659 }
1661 } 1660 }
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index acae38aa..ae108875 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/gk20a.c
3 *
4 * GK20A Graphics 2 * GK20A Graphics
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -45,6 +43,10 @@
45#include <linux/sched.h> 43#include <linux/sched.h>
46#include <linux/input-cfboost.h> 44#include <linux/input-cfboost.h>
47 45
46#ifdef CONFIG_TEGRA_GK20A
47#include <linux/nvhost.h>
48#endif
49
48#include "gk20a.h" 50#include "gk20a.h"
49#include "debug_gk20a.h" 51#include "debug_gk20a.h"
50#include "ctrl_gk20a.h" 52#include "ctrl_gk20a.h"
@@ -57,7 +59,6 @@
57#include "gk20a_scale.h" 59#include "gk20a_scale.h"
58#include "dbg_gpu_gk20a.h" 60#include "dbg_gpu_gk20a.h"
59#include "hal.h" 61#include "hal.h"
60#include "nvhost_acm.h"
61#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 62#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
62#include "vgpu/vgpu.h" 63#include "vgpu/vgpu.h"
63#endif 64#endif
@@ -850,11 +851,11 @@ static int gk20a_pm_prepare_poweroff(struct device *dev)
850 851
851static void gk20a_detect_chip(struct gk20a *g) 852static void gk20a_detect_chip(struct gk20a *g)
852{ 853{
853 struct nvhost_gpu_characteristics *gpu = &g->gpu_characteristics; 854 struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics;
854 855
855 u32 mc_boot_0_value = gk20a_readl(g, mc_boot_0_r()); 856 u32 mc_boot_0_value = gk20a_readl(g, mc_boot_0_r());
856 gpu->arch = mc_boot_0_architecture_v(mc_boot_0_value) << 857 gpu->arch = mc_boot_0_architecture_v(mc_boot_0_value) <<
857 NVHOST_GPU_ARCHITECTURE_SHIFT; 858 NVGPU_GPU_ARCHITECTURE_SHIFT;
858 gpu->impl = mc_boot_0_implementation_v(mc_boot_0_value); 859 gpu->impl = mc_boot_0_implementation_v(mc_boot_0_value);
859 gpu->rev = 860 gpu->rev =
860 (mc_boot_0_major_revision_v(mc_boot_0_value) << 4) | 861 (mc_boot_0_major_revision_v(mc_boot_0_value) << 4) |
@@ -1622,7 +1623,7 @@ static int __exit gk20a_remove(struct platform_device *dev)
1622 pm_runtime_put(&dev->dev); 1623 pm_runtime_put(&dev->dev);
1623 pm_runtime_disable(&dev->dev); 1624 pm_runtime_disable(&dev->dev);
1624#else 1625#else
1625 nvhost_module_disable_clk(&dev->dev); 1626 gk20a_pm_disable_clk(&dev->dev);
1626#endif 1627#endif
1627 1628
1628 return 0; 1629 return 0;
@@ -1894,7 +1895,7 @@ int gk20a_do_unidle(void)
1894 1895
1895int gk20a_init_gpu_characteristics(struct gk20a *g) 1896int gk20a_init_gpu_characteristics(struct gk20a *g)
1896{ 1897{
1897 struct nvhost_gpu_characteristics *gpu = &g->gpu_characteristics; 1898 struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics;
1898 1899
1899 gpu->L2_cache_size = g->ops.ltc.determine_L2_size_bytes(g); 1900 gpu->L2_cache_size = g->ops.ltc.determine_L2_size_bytes(g);
1900 gpu->on_board_video_memory_size = 0; /* integrated GPU */ 1901 gpu->on_board_video_memory_size = 0; /* integrated GPU */
@@ -1902,18 +1903,18 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
1902 gpu->num_gpc = g->gr.gpc_count; 1903 gpu->num_gpc = g->gr.gpc_count;
1903 gpu->num_tpc_per_gpc = g->gr.max_tpc_per_gpc_count; 1904 gpu->num_tpc_per_gpc = g->gr.max_tpc_per_gpc_count;
1904 1905
1905 gpu->bus_type = NVHOST_GPU_BUS_TYPE_AXI; /* always AXI for now */ 1906 gpu->bus_type = NVGPU_GPU_BUS_TYPE_AXI; /* always AXI for now */
1906 1907
1907 gpu->big_page_size = g->mm.big_page_size; 1908 gpu->big_page_size = g->mm.big_page_size;
1908 gpu->compression_page_size = g->mm.compression_page_size; 1909 gpu->compression_page_size = g->mm.compression_page_size;
1909 gpu->pde_coverage_bit_count = g->mm.pde_stride_shift; 1910 gpu->pde_coverage_bit_count = g->mm.pde_stride_shift;
1910 1911
1911 gpu->flags = NVHOST_GPU_FLAGS_SUPPORT_PARTIAL_MAPPINGS 1912 gpu->flags = NVGPU_GPU_FLAGS_SUPPORT_PARTIAL_MAPPINGS
1912 | NVHOST_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS; 1913 | NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS;
1913 1914
1914 if (IS_ENABLED(CONFIG_TEGRA_GK20A) && 1915 if (IS_ENABLED(CONFIG_TEGRA_GK20A) &&
1915 gk20a_platform_has_syncpoints(g->dev)) 1916 gk20a_platform_has_syncpoints(g->dev))
1916 gpu->flags |= NVHOST_GPU_FLAGS_HAS_SYNCPOINTS; 1917 gpu->flags |= NVGPU_GPU_FLAGS_HAS_SYNCPOINTS;
1917 1918
1918 gpu->reserved = 0; 1919 gpu->reserved = 0;
1919 1920
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index ae640277..5429a570 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/gk20a.h
3 *
4 * GK20A Graphics 2 * GK20A Graphics
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,12 +12,11 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21#ifndef _NVHOST_GK20A_H_ 18#ifndef GK20A_H
22#define _NVHOST_GK20A_H_ 19#define GK20A_H
23 20
24 21
25struct gk20a; 22struct gk20a;
@@ -133,9 +130,9 @@ struct gpu_ops {
133 u32 (*get_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index); 130 u32 (*get_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index);
134 void (*free_channel_ctx)(struct channel_gk20a *c); 131 void (*free_channel_ctx)(struct channel_gk20a *c);
135 int (*alloc_obj_ctx)(struct channel_gk20a *c, 132 int (*alloc_obj_ctx)(struct channel_gk20a *c,
136 struct nvhost_alloc_obj_ctx_args *args); 133 struct nvgpu_alloc_obj_ctx_args *args);
137 int (*free_obj_ctx)(struct channel_gk20a *c, 134 int (*free_obj_ctx)(struct channel_gk20a *c,
138 struct nvhost_free_obj_ctx_args *args); 135 struct nvgpu_free_obj_ctx_args *args);
139 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr, 136 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr,
140 struct channel_gk20a *c, u64 zcull_va, 137 struct channel_gk20a *c, u64 zcull_va,
141 u32 mode); 138 u32 mode);
@@ -405,7 +402,7 @@ struct gk20a {
405 402
406 spinlock_t mc_enable_lock; 403 spinlock_t mc_enable_lock;
407 404
408 struct nvhost_gpu_characteristics gpu_characteristics; 405 struct nvgpu_gpu_characteristics gpu_characteristics;
409 406
410 struct { 407 struct {
411 struct cdev cdev; 408 struct cdev cdev;
@@ -504,11 +501,11 @@ struct gk20a_cyclestate_buffer_elem {
504#ifdef CONFIG_DEBUG_FS 501#ifdef CONFIG_DEBUG_FS
505 /* debug info, default is compiled-in but effectively disabled (0 mask) */ 502 /* debug info, default is compiled-in but effectively disabled (0 mask) */
506 #define GK20A_DEBUG 503 #define GK20A_DEBUG
507 /*e.g: echo 1 > /d/tegra_host/dbg_mask */ 504 /*e.g: echo 1 > /d/gk20a.0/dbg_mask */
508 #define GK20A_DEFAULT_DBG_MASK 0 505 #define GK20A_DEFAULT_DBG_MASK 0
509#else 506#else
510 /* manually enable and turn it on the mask */ 507 /* manually enable and turn it on the mask */
511 /*#define NVHOST_DEBUG*/ 508 /*#define NVGPU_DEBUG*/
512 #define GK20A_DEFAULT_DBG_MASK (dbg_info) 509 #define GK20A_DEFAULT_DBG_MASK (dbg_info)
513#endif 510#endif
514 511
@@ -719,21 +716,21 @@ int __gk20a_do_unidle(struct platform_device *pdev);
719const struct firmware * 716const struct firmware *
720gk20a_request_firmware(struct gk20a *g, const char *fw_name); 717gk20a_request_firmware(struct gk20a *g, const char *fw_name);
721 718
722#define NVHOST_GPU_ARCHITECTURE_SHIFT 4 719#define NVGPU_GPU_ARCHITECTURE_SHIFT 4
723 720
724/* constructs unique and compact GPUID from nvhost_gpu_characteristics 721/* constructs unique and compact GPUID from nvgpu_gpu_characteristics
725 * arch/impl fields */ 722 * arch/impl fields */
726#define GK20A_GPUID(arch, impl) ((u32) ((arch) | (impl))) 723#define GK20A_GPUID(arch, impl) ((u32) ((arch) | (impl)))
727 724
728#define GK20A_GPUID_GK20A \ 725#define GK20A_GPUID_GK20A \
729 GK20A_GPUID(NVHOST_GPU_ARCH_GK100, NVHOST_GPU_IMPL_GK20A) 726 GK20A_GPUID(NVGPU_GPU_ARCH_GK100, NVGPU_GPU_IMPL_GK20A)
730 727
731#define GK20A_GPUID_GM20B \ 728#define GK20A_GPUID_GM20B \
732 GK20A_GPUID(NVHOST_GPU_ARCH_GM200, NVHOST_GPU_IMPL_GM20B) 729 GK20A_GPUID(NVGPU_GPU_ARCH_GM200, NVGPU_GPU_IMPL_GM20B)
733 730
734int gk20a_init_gpu_characteristics(struct gk20a *g); 731int gk20a_init_gpu_characteristics(struct gk20a *g);
735 732
736int gk20a_user_init(struct platform_device *dev); 733int gk20a_user_init(struct platform_device *dev);
737void gk20a_user_deinit(struct platform_device *dev); 734void gk20a_user_deinit(struct platform_device *dev);
738 735
739#endif /* _NVHOST_GK20A_H_ */ 736#endif /* GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h
index cd72ec9c..5621800e 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * gk20a allocator
3 *
4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
5 * 3 *
6 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -16,8 +14,8 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 15 */
18 16
19#ifndef __NVHOST_ALLOCATOR_H__ 17#ifndef GK20A_ALLOCATOR_H
20#define __NVHOST_ALLOCATOR_H__ 18#define GK20A_ALLOCATOR_H
21 19
22#include <linux/rbtree.h> 20#include <linux/rbtree.h>
23#include <linux/rwsem.h> 21#include <linux/rwsem.h>
@@ -112,4 +110,4 @@ do { \
112 110
113#endif /* ALLOCATOR_DEBUG */ 111#endif /* ALLOCATOR_DEBUG */
114 112
115#endif /*__NVHOST_ALLOCATOR_H__ */ 113#endif /* GK20A_ALLOCATOR_H */
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_scale.h b/drivers/gpu/nvgpu/gk20a/gk20a_scale.h
index e76b1662..561ecaed 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_scale.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_scale.h
@@ -19,7 +19,6 @@
19#ifndef GK20A_SCALE_H 19#ifndef GK20A_SCALE_H
20#define GK20A_SCALE_H 20#define GK20A_SCALE_H
21 21
22#include <linux/nvhost.h>
23#include <linux/devfreq.h> 22#include <linux/devfreq.h>
24 23
25struct platform_device; 24struct platform_device;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 90838c64..11bca5bb 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -51,6 +51,7 @@
51#include "gr_pri_gk20a.h" 51#include "gr_pri_gk20a.h"
52#include "regops_gk20a.h" 52#include "regops_gk20a.h"
53#include "dbg_gpu_gk20a.h" 53#include "dbg_gpu_gk20a.h"
54#include "semaphore_gk20a.h"
54 55
55#define BLK_SIZE (256) 56#define BLK_SIZE (256)
56 57
@@ -2174,8 +2175,8 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
2174 * In case bootloader is not supported, revert to the old way of 2175 * In case bootloader is not supported, revert to the old way of
2175 * loading gr ucode, without the faster bootstrap routine. 2176 * loading gr ucode, without the faster bootstrap routine.
2176 */ 2177 */
2177 if (g->gpu_characteristics.arch != NVHOST_GPU_ARCH_GK100 && 2178 if (g->gpu_characteristics.arch != NVGPU_GPU_ARCH_GK100 &&
2178 g->gpu_characteristics.arch != NVHOST_GPU_ARCH_GM200) { 2179 g->gpu_characteristics.arch != NVGPU_GPU_ARCH_GM200) {
2179 gr_gk20a_load_falcon_dmem(g); 2180 gr_gk20a_load_falcon_dmem(g);
2180 gr_gk20a_load_falcon_imem(g); 2181 gr_gk20a_load_falcon_imem(g);
2181 gr_gk20a_start_falcon_ucode(g); 2182 gr_gk20a_start_falcon_ucode(g);
@@ -2437,7 +2438,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2437 } 2438 }
2438 2439
2439 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2440 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
2440 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2441 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2441 gk20a_mem_flag_none); 2442 gk20a_mem_flag_none);
2442 if (!gpu_va) 2443 if (!gpu_va)
2443 goto clean_up; 2444 goto clean_up;
@@ -2454,7 +2455,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2454 } 2455 }
2455 2456
2456 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2457 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
2457 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2458 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2458 gk20a_mem_flag_none); 2459 gk20a_mem_flag_none);
2459 if (!gpu_va) 2460 if (!gpu_va)
2460 goto clean_up; 2461 goto clean_up;
@@ -2471,7 +2472,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2471 } 2472 }
2472 2473
2473 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2474 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
2474 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2475 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2475 gk20a_mem_flag_none); 2476 gk20a_mem_flag_none);
2476 if (!gpu_va) 2477 if (!gpu_va)
2477 goto clean_up; 2478 goto clean_up;
@@ -2574,7 +2575,7 @@ static int __gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2574 goto err_free; 2575 goto err_free;
2575 2576
2576 gr_ctx->gpu_va = gk20a_gmmu_map(vm, &sgt, gr_ctx->size, 2577 gr_ctx->gpu_va = gk20a_gmmu_map(vm, &sgt, gr_ctx->size,
2577 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2578 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2578 gk20a_mem_flag_none); 2579 gk20a_mem_flag_none);
2579 if (!gr_ctx->gpu_va) 2580 if (!gr_ctx->gpu_va)
2580 goto err_free_sgt; 2581 goto err_free_sgt;
@@ -2780,7 +2781,7 @@ static bool gr_gk20a_is_valid_class(struct gk20a *g, u32 class_num)
2780} 2781}
2781 2782
2782int gk20a_alloc_obj_ctx(struct channel_gk20a *c, 2783int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2783 struct nvhost_alloc_obj_ctx_args *args) 2784 struct nvgpu_alloc_obj_ctx_args *args)
2784{ 2785{
2785 struct gk20a *g = c->g; 2786 struct gk20a *g = c->g;
2786 struct fifo_gk20a *f = &g->fifo; 2787 struct fifo_gk20a *f = &g->fifo;
@@ -2943,7 +2944,7 @@ out:
2943} 2944}
2944 2945
2945int gk20a_free_obj_ctx(struct channel_gk20a *c, 2946int gk20a_free_obj_ctx(struct channel_gk20a *c,
2946 struct nvhost_free_obj_ctx_args *args) 2947 struct nvgpu_free_obj_ctx_args *args)
2947{ 2948{
2948 unsigned long timeout = gk20a_get_gr_idle_timeout(c->g); 2949 unsigned long timeout = gk20a_get_gr_idle_timeout(c->g);
2949 2950
@@ -4956,7 +4957,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
4956 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4957 struct channel_gk20a *ch = &f->channel[isr_data->chid];
4957 gk20a_dbg_fn(""); 4958 gk20a_dbg_fn("");
4958 gk20a_set_error_notifier(ch, 4959 gk20a_set_error_notifier(ch,
4959 NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT); 4960 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
4960 gk20a_err(dev_from_gk20a(g), 4961 gk20a_err(dev_from_gk20a(g),
4961 "gr semaphore timeout\n"); 4962 "gr semaphore timeout\n");
4962 return -EINVAL; 4963 return -EINVAL;
@@ -4969,7 +4970,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
4969 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4970 struct channel_gk20a *ch = &f->channel[isr_data->chid];
4970 gk20a_dbg_fn(""); 4971 gk20a_dbg_fn("");
4971 gk20a_set_error_notifier(ch, 4972 gk20a_set_error_notifier(ch,
4972 NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY); 4973 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
4973 /* This is an unrecoverable error, reset is needed */ 4974 /* This is an unrecoverable error, reset is needed */
4974 gk20a_err(dev_from_gk20a(g), 4975 gk20a_err(dev_from_gk20a(g),
4975 "gr semaphore timeout\n"); 4976 "gr semaphore timeout\n");
@@ -4997,7 +4998,7 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g,
4997 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4998 struct channel_gk20a *ch = &f->channel[isr_data->chid];
4998 gk20a_dbg_fn(""); 4999 gk20a_dbg_fn("");
4999 gk20a_set_error_notifier(ch, 5000 gk20a_set_error_notifier(ch,
5000 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5001 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5001 gk20a_err(dev_from_gk20a(g), 5002 gk20a_err(dev_from_gk20a(g),
5002 "invalid class 0x%08x, offset 0x%08x", 5003 "invalid class 0x%08x, offset 0x%08x",
5003 isr_data->class_num, isr_data->offset); 5004 isr_data->class_num, isr_data->offset);
@@ -5037,7 +5038,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5037 gk20a_dbg_fn(""); 5038 gk20a_dbg_fn("");
5038 5039
5039 gk20a_set_error_notifier(ch, 5040 gk20a_set_error_notifier(ch,
5040 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5041 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5041 gk20a_err(dev_from_gk20a(g), 5042 gk20a_err(dev_from_gk20a(g),
5042 "class error 0x%08x, offset 0x%08x, unhandled intr 0x%08x for channel %u\n", 5043 "class error 0x%08x, offset 0x%08x, unhandled intr 0x%08x for channel %u\n",
5043 isr_data->class_num, isr_data->offset, 5044 isr_data->class_num, isr_data->offset,
@@ -5054,7 +5055,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5054 gk20a_dbg_fn(""); 5055 gk20a_dbg_fn("");
5055 5056
5056 gk20a_set_error_notifier(ch, 5057 gk20a_set_error_notifier(ch,
5057 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5058 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5058 gk20a_err(dev_from_gk20a(g), 5059 gk20a_err(dev_from_gk20a(g),
5059 "firmware method 0x%08x, offset 0x%08x for channel %u\n", 5060 "firmware method 0x%08x, offset 0x%08x for channel %u\n",
5060 isr_data->class_num, isr_data->offset, 5061 isr_data->class_num, isr_data->offset,
@@ -5674,7 +5675,7 @@ int gk20a_gr_isr(struct gk20a *g)
5674 5675
5675 if (need_reset) 5676 if (need_reset)
5676 gk20a_set_error_notifier(ch, 5677 gk20a_set_error_notifier(ch,
5677 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5678 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5678 } 5679 }
5679 5680
5680 gk20a_writel(g, gr_intr_r(), gr_intr_exception_reset_f()); 5681 gk20a_writel(g, gr_intr_r(), gr_intr_exception_reset_f());
@@ -6774,7 +6775,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
6774 6775
6775 6776
6776int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, 6777int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
6777 struct nvhost_dbg_gpu_reg_op *ctx_ops, u32 num_ops, 6778 struct nvgpu_dbg_gpu_reg_op *ctx_ops, u32 num_ops,
6778 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops) 6779 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops)
6779{ 6780{
6780 struct gk20a *g = ch->g; 6781 struct gk20a *g = ch->g;
@@ -6921,7 +6922,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
6921 "ctx op invalid offset: offset=0x%x", 6922 "ctx op invalid offset: offset=0x%x",
6922 ctx_ops[i].offset); 6923 ctx_ops[i].offset);
6923 ctx_ops[i].status = 6924 ctx_ops[i].status =
6924 NVHOST_DBG_GPU_REG_OP_STATUS_INVALID_OFFSET; 6925 NVGPU_DBG_GPU_REG_OP_STATUS_INVALID_OFFSET;
6925 continue; 6926 continue;
6926 } 6927 }
6927 6928
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index f60afd58..e9bf4505 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -15,8 +15,8 @@
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18#ifndef __GR_GK20A_H__ 18#ifndef GR_GK20A_H
19#define __GR_GK20A_H__ 19#define GR_GK20A_H
20 20
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
@@ -330,13 +330,13 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
330 330
331int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr); 331int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr);
332 332
333struct nvhost_alloc_obj_ctx_args; 333struct nvgpu_alloc_obj_ctx_args;
334struct nvhost_free_obj_ctx_args; 334struct nvgpu_free_obj_ctx_args;
335 335
336int gk20a_alloc_obj_ctx(struct channel_gk20a *c, 336int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
337 struct nvhost_alloc_obj_ctx_args *args); 337 struct nvgpu_alloc_obj_ctx_args *args);
338int gk20a_free_obj_ctx(struct channel_gk20a *c, 338int gk20a_free_obj_ctx(struct channel_gk20a *c,
339 struct nvhost_free_obj_ctx_args *args); 339 struct nvgpu_free_obj_ctx_args *args);
340void gk20a_free_channel_ctx(struct channel_gk20a *c); 340void gk20a_free_channel_ctx(struct channel_gk20a *c);
341 341
342int gk20a_gr_isr(struct gk20a *g); 342int gk20a_gr_isr(struct gk20a *g);
@@ -384,9 +384,9 @@ bool gk20a_gr_sm_debugger_attached(struct gk20a *g);
384 384
385int gk20a_gr_suspend(struct gk20a *g); 385int gk20a_gr_suspend(struct gk20a *g);
386 386
387struct nvhost_dbg_gpu_reg_op; 387struct nvgpu_dbg_gpu_reg_op;
388int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, 388int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
389 struct nvhost_dbg_gpu_reg_op *ctx_ops, u32 num_ops, 389 struct nvgpu_dbg_gpu_reg_op *ctx_ops, u32 num_ops,
390 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops); 390 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops);
391int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, 391int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
392 u32 addr, 392 u32 addr,
@@ -424,4 +424,4 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g);
424void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g); 424void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g);
425 425
426void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *c); 426void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *c);
427#endif /*__GR_GK20A_H__*/ 427#endif /* GR_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
index a82a1ee7..9e1a1cb8 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
@@ -15,8 +15,8 @@
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18#ifndef _NVHOST_GR_PRI_GK20A_H_ 18#ifndef GR_PRI_GK20A_H
19#define _NVHOST_GR_PRI_GK20A_H_ 19#define GR_PRI_GK20A_H
20 20
21/* 21/*
22 * These convenience macros are generally for use in the management/modificaiton 22 * These convenience macros are generally for use in the management/modificaiton
@@ -176,4 +176,4 @@ enum ctxsw_addr_type {
176#define PRI_BROADCAST_FLAGS_BE BIT(2) 176#define PRI_BROADCAST_FLAGS_BE BIT(2)
177#define PRI_BROADCAST_FLAGS_PPC BIT(3) 177#define PRI_BROADCAST_FLAGS_PPC BIT(3)
178 178
179#endif /*_NVHOST_GR_PRI_GK20A_H_ */ 179#endif /* GR_PRI_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.h b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.h
index 208811b2..df7dc9e4 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.h
@@ -13,8 +13,8 @@
13 * more details. 13 * more details.
14 */ 14 */
15 15
16#ifndef _NVHOST_GK20A_LTC 16#ifndef LTC_GK20A_H
17#define _NVHOST_GK20A_LTC 17#define LTC_GK20A_H
18struct gk20a; 18struct gk20a;
19 19
20void gk20a_init_ltc(struct gpu_ops *gops); 20void gk20a_init_ltc(struct gpu_ops *gops);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 7660c949..37813ad3 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/mm_gk20a.c
3 *
4 * GK20A memory management 2 * GK20A memory management
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,9 +12,8 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21 18
22#include <linux/delay.h> 19#include <linux/delay.h>
@@ -29,6 +26,7 @@
29#include <linux/tegra-soc.h> 26#include <linux/tegra-soc.h>
30#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
31#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29#include <linux/nvhost_as_ioctl.h>
32 30
33#include "gk20a.h" 31#include "gk20a.h"
34#include "mm_gk20a.h" 32#include "mm_gk20a.h"
@@ -42,6 +40,7 @@
42#include "hw_ltc_gk20a.h" 40#include "hw_ltc_gk20a.h"
43 41
44#include "kind_gk20a.h" 42#include "kind_gk20a.h"
43#include "semaphore_gk20a.h"
45 44
46/* 45/*
47 * GPU mapping life cycle 46 * GPU mapping life cycle
@@ -819,7 +818,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
819 return; 818 return;
820 } 819 }
821 820
822 if (mapped_buffer->flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 821 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
823 mutex_unlock(&vm->update_gmmu_lock); 822 mutex_unlock(&vm->update_gmmu_lock);
824 823
825 if (tegra_platform_is_silicon()) 824 if (tegra_platform_is_silicon())
@@ -1175,7 +1174,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1175 kind_v, 1174 kind_v,
1176 ctag_offset, 1175 ctag_offset,
1177 flags & 1176 flags &
1178 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1177 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
1179 rw_flag); 1178 rw_flag);
1180 if (err) { 1179 if (err) {
1181 gk20a_err(d, "failed to update ptes on map"); 1180 gk20a_err(d, "failed to update ptes on map");
@@ -1256,7 +1255,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
1256 if (mapped_buffer->flags != flags) 1255 if (mapped_buffer->flags != flags)
1257 return 0; 1256 return 0;
1258 1257
1259 if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET && 1258 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET &&
1260 mapped_buffer->addr != offset_align) 1259 mapped_buffer->addr != offset_align)
1261 return 0; 1260 return 0;
1262 1261
@@ -1303,7 +1302,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
1303u64 gk20a_vm_map(struct vm_gk20a *vm, 1302u64 gk20a_vm_map(struct vm_gk20a *vm,
1304 struct dma_buf *dmabuf, 1303 struct dma_buf *dmabuf,
1305 u64 offset_align, 1304 u64 offset_align,
1306 u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/, 1305 u32 flags /*NVGPU_AS_MAP_BUFFER_FLAGS_*/,
1307 int kind, 1306 int kind,
1308 struct sg_table **sgt, 1307 struct sg_table **sgt,
1309 bool user_mapped, 1308 bool user_mapped,
@@ -1364,7 +1363,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
1364 1363
1365 /* If FIX_OFFSET is set, pgsz is determined. Otherwise, select 1364 /* If FIX_OFFSET is set, pgsz is determined. Otherwise, select
1366 * page size according to memory alignment */ 1365 * page size according to memory alignment */
1367 if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 1366 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
1368 bfr.pgsz_idx = NV_GMMU_VA_IS_UPPER(offset_align) ? 1367 bfr.pgsz_idx = NV_GMMU_VA_IS_UPPER(offset_align) ?
1369 gmmu_page_size_big : gmmu_page_size_small; 1368 gmmu_page_size_big : gmmu_page_size_small;
1370 } else { 1369 } else {
@@ -1390,7 +1389,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
1390 1389
1391 /* Check if we should use a fixed offset for mapping this buffer */ 1390 /* Check if we should use a fixed offset for mapping this buffer */
1392 1391
1393 if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 1392 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
1394 err = validate_fixed_buffer(vm, &bfr, 1393 err = validate_fixed_buffer(vm, &bfr,
1395 offset_align, mapping_size); 1394 offset_align, mapping_size);
1396 if (err) 1395 if (err)
@@ -1996,7 +1995,7 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
1996 for (i = 0; i < num_pages; i++) { 1995 for (i = 0; i < num_pages; i++) {
1997 u64 page_vaddr = g->ops.mm.gmmu_map(vm, vaddr, 1996 u64 page_vaddr = g->ops.mm.gmmu_map(vm, vaddr,
1998 vm->zero_page_sgt, 0, pgsz, pgsz_idx, 0, 0, 1997 vm->zero_page_sgt, 0, pgsz, pgsz_idx, 0, 0,
1999 NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET, 1998 NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET,
2000 gk20a_mem_flag_none, false); 1999 gk20a_mem_flag_none, false);
2001 2000
2002 if (!page_vaddr) { 2001 if (!page_vaddr) {
@@ -2322,7 +2321,7 @@ int gk20a_vm_release_share(struct gk20a_as_share *as_share)
2322 2321
2323 2322
2324int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, 2323int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2325 struct nvhost_as_alloc_space_args *args) 2324 struct nvgpu_as_alloc_space_args *args)
2326 2325
2327{ int err = -ENOMEM; 2326{ int err = -ENOMEM;
2328 int pgsz_idx; 2327 int pgsz_idx;
@@ -2356,7 +2355,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2356 goto clean_up; 2355 goto clean_up;
2357 } 2356 }
2358 2357
2359 if (args->flags & NVHOST_AS_ALLOC_SPACE_FLAGS_SPARSE && 2358 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE &&
2360 pgsz_idx != gmmu_page_size_big) { 2359 pgsz_idx != gmmu_page_size_big) {
2361 err = -ENOSYS; 2360 err = -ENOSYS;
2362 kfree(va_node); 2361 kfree(va_node);
@@ -2364,7 +2363,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2364 } 2363 }
2365 2364
2366 start_page_nr = 0; 2365 start_page_nr = 0;
2367 if (args->flags & NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) 2366 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
2368 start_page_nr = (u32)(args->o_a.offset >> 2367 start_page_nr = (u32)(args->o_a.offset >>
2369 gmmu_page_shifts[pgsz_idx]); 2368 gmmu_page_shifts[pgsz_idx]);
2370 2369
@@ -2386,7 +2385,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2386 mutex_lock(&vm->update_gmmu_lock); 2385 mutex_lock(&vm->update_gmmu_lock);
2387 2386
2388 /* mark that we need to use sparse mappings here */ 2387 /* mark that we need to use sparse mappings here */
2389 if (args->flags & NVHOST_AS_ALLOC_SPACE_FLAGS_SPARSE) { 2388 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) {
2390 err = g->ops.mm.set_sparse(vm, vaddr_start, args->pages, 2389 err = g->ops.mm.set_sparse(vm, vaddr_start, args->pages,
2391 pgsz_idx, true); 2390 pgsz_idx, true);
2392 if (err) { 2391 if (err) {
@@ -2409,7 +2408,7 @@ clean_up:
2409} 2408}
2410 2409
2411int gk20a_vm_free_space(struct gk20a_as_share *as_share, 2410int gk20a_vm_free_space(struct gk20a_as_share *as_share,
2412 struct nvhost_as_free_space_args *args) 2411 struct nvgpu_as_free_space_args *args)
2413{ 2412{
2414 int err = -ENOMEM; 2413 int err = -ENOMEM;
2415 int pgsz_idx; 2414 int pgsz_idx;
@@ -2580,7 +2579,7 @@ static int gk20a_dmabuf_get_kind(struct dma_buf *dmabuf)
2580int gk20a_vm_map_buffer(struct gk20a_as_share *as_share, 2579int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
2581 int dmabuf_fd, 2580 int dmabuf_fd,
2582 u64 *offset_align, 2581 u64 *offset_align,
2583 u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/ 2582 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/
2584 int kind, 2583 int kind,
2585 u64 buffer_offset, 2584 u64 buffer_offset,
2586 u64 mapping_size) 2585 u64 mapping_size)
@@ -3147,7 +3146,7 @@ bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g)
3147 3146
3148void gk20a_init_mm(struct gpu_ops *gops) 3147void gk20a_init_mm(struct gpu_ops *gops)
3149{ 3148{
3150 /* remember to remove NVHOST_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS in 3149 /* remember to remove NVGPU_GPU_FLAGS_SUPPORT_SPARSE_ALLOCS in
3151 * characteristics flags if sparse support is removed */ 3150 * characteristics flags if sparse support is removed */
3152 gops->mm.set_sparse = gk20a_vm_put_sparse; 3151 gops->mm.set_sparse = gk20a_vm_put_sparse;
3153 gops->mm.put_empty = gk20a_vm_put_empty; 3152 gops->mm.put_empty = gk20a_vm_put_empty;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 89a2108b..6c46e113 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/mm_gk20a.h
3 *
4 * GK20A memory management 2 * GK20A memory management
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,12 +12,11 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21#ifndef __MM_GK20A_H__ 18#ifndef MM_GK20A_H
22#define __MM_GK20A_H__ 19#define MM_GK20A_H
23 20
24#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
25#include <linux/dma-attrs.h> 22#include <linux/dma-attrs.h>
@@ -210,14 +207,6 @@ struct page_table_gk20a {
210 size_t size; 207 size_t size;
211}; 208};
212 209
213#ifndef _NVHOST_MEM_MGR_H
214enum gk20a_mem_rw_flag {
215 gk20a_mem_flag_none = 0,
216 gk20a_mem_flag_read_only = 1,
217 gk20a_mem_flag_write_only = 2,
218};
219#endif
220
221enum gmmu_pgsz_gk20a { 210enum gmmu_pgsz_gk20a {
222 gmmu_page_size_small = 0, 211 gmmu_page_size_small = 0,
223 gmmu_page_size_big = 1, 212 gmmu_page_size_big = 1,
@@ -476,7 +465,7 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
476u64 gk20a_vm_map(struct vm_gk20a *vm, 465u64 gk20a_vm_map(struct vm_gk20a *vm,
477 struct dma_buf *dmabuf, 466 struct dma_buf *dmabuf,
478 u64 offset_align, 467 u64 offset_align,
479 u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/, 468 u32 flags /*NVGPU_AS_MAP_BUFFER_FLAGS_*/,
480 int kind, 469 int kind,
481 struct sg_table **sgt, 470 struct sg_table **sgt,
482 bool user_mapped, 471 bool user_mapped,
@@ -521,20 +510,20 @@ int gk20a_vm_free_va(struct vm_gk20a *vm,
521 enum gmmu_pgsz_gk20a pgsz_idx); 510 enum gmmu_pgsz_gk20a pgsz_idx);
522 511
523/* vm-as interface */ 512/* vm-as interface */
524struct nvhost_as_alloc_space_args; 513struct nvgpu_as_alloc_space_args;
525struct nvhost_as_free_space_args; 514struct nvgpu_as_free_space_args;
526int gk20a_vm_alloc_share(struct gk20a_as_share *as_share); 515int gk20a_vm_alloc_share(struct gk20a_as_share *as_share);
527int gk20a_vm_release_share(struct gk20a_as_share *as_share); 516int gk20a_vm_release_share(struct gk20a_as_share *as_share);
528int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, 517int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
529 struct nvhost_as_alloc_space_args *args); 518 struct nvgpu_as_alloc_space_args *args);
530int gk20a_vm_free_space(struct gk20a_as_share *as_share, 519int gk20a_vm_free_space(struct gk20a_as_share *as_share,
531 struct nvhost_as_free_space_args *args); 520 struct nvgpu_as_free_space_args *args);
532int gk20a_vm_bind_channel(struct gk20a_as_share *as_share, 521int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
533 struct channel_gk20a *ch); 522 struct channel_gk20a *ch);
534int gk20a_vm_map_buffer(struct gk20a_as_share *as_share, 523int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
535 int dmabuf_fd, 524 int dmabuf_fd,
536 u64 *offset_align, 525 u64 *offset_align,
537 u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/ 526 u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */
538 int kind, 527 int kind,
539 u64 buffer_offset, 528 u64 buffer_offset,
540 u64 mapping_size); 529 u64 mapping_size);
@@ -570,4 +559,4 @@ void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i);
570 559
571struct gpu_ops; 560struct gpu_ops;
572void gk20a_init_mm(struct gpu_ops *gops); 561void gk20a_init_mm(struct gpu_ops *gops);
573#endif /*_MM_GK20A_H_ */ 562#endif /* MM_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
index bbbbccb4..d8a30d9a 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/nvhost.h>
19#include <linux/debugfs.h> 20#include <linux/debugfs.h>
20#include <linux/tegra-powergate.h> 21#include <linux/tegra-powergate.h>
21#include <linux/platform_data/tegra_edp.h> 22#include <linux/platform_data/tegra_edp.h>
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index f0ce3c9a..eb79fe17 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/pmu_gk20a.c
3 *
4 * GK20A PMU (aka. gPMU outside gk20a context) 2 * GK20A PMU (aka. gPMU outside gk20a context)
5 * 3 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
@@ -14,9 +12,8 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 13 * more details.
16 * 14 *
17 * You should have received a copy of the GNU General Public License along with 15 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 17 */
21 18
22#include <linux/delay.h> /* for mdelay */ 19#include <linux/delay.h> /* for mdelay */
@@ -29,6 +26,7 @@
29 26
30#include "gk20a.h" 27#include "gk20a.h"
31#include "gr_gk20a.h" 28#include "gr_gk20a.h"
29#include "semaphore_gk20a.h"
32#include "hw_mc_gk20a.h" 30#include "hw_mc_gk20a.h"
33#include "hw_pwr_gk20a.h" 31#include "hw_pwr_gk20a.h"
34#include "hw_top_gk20a.h" 32#include "hw_top_gk20a.h"
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index 628b12ef..0e1081b9 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -1,5 +1,4 @@
1/* 1/*
2 *
3 * Tegra GK20A GPU Debugger Driver Register Ops 2 * Tegra GK20A GPU Debugger Driver Register Ops
4 * 3 *
5 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
@@ -372,12 +371,12 @@ static const u32 gk20a_qctl_whitelist_ranges_count =
372 371
373static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, 372static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
374 u32 *ctx_rd_count, u32 *ctx_wr_count, 373 u32 *ctx_rd_count, u32 *ctx_wr_count,
375 struct nvhost_dbg_gpu_reg_op *ops, 374 struct nvgpu_dbg_gpu_reg_op *ops,
376 u32 op_count); 375 u32 op_count);
377 376
378 377
379int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, 378int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
380 struct nvhost_dbg_gpu_reg_op *ops, 379 struct nvgpu_dbg_gpu_reg_op *ops,
381 u64 num_ops) 380 u64 num_ops)
382{ 381{
383 int err = 0, i; 382 int err = 0, i;
@@ -502,7 +501,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
502 501
503 502
504static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s, 503static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
505 struct nvhost_dbg_gpu_reg_op *op) 504 struct nvgpu_dbg_gpu_reg_op *op)
506{ 505{
507 int err = 0; 506 int err = 0;
508 507
@@ -531,7 +530,7 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
531 case REGOP(TYPE_GR_CTX_QUAD): 530 case REGOP(TYPE_GR_CTX_QUAD):
532 break; 531 break;
533 /* 532 /*
534 case NVHOST_DBG_GPU_REG_OP_TYPE_FB: 533 case NVGPU_DBG_GPU_REG_OP_TYPE_FB:
535 */ 534 */
536 default: 535 default:
537 op->status |= REGOP(STATUS_INVALID_TYPE); 536 op->status |= REGOP(STATUS_INVALID_TYPE);
@@ -544,7 +543,7 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
544} 543}
545 544
546static bool check_whitelists(struct dbg_session_gk20a *dbg_s, 545static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
547 struct nvhost_dbg_gpu_reg_op *op, u32 offset) 546 struct nvgpu_dbg_gpu_reg_op *op, u32 offset)
548{ 547{
549 struct gk20a *g = dbg_s->g; 548 struct gk20a *g = dbg_s->g;
550 bool valid = false; 549 bool valid = false;
@@ -606,7 +605,7 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
606 605
607/* note: the op here has already been through validate_reg_op_info */ 606/* note: the op here has already been through validate_reg_op_info */
608static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s, 607static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
609 struct nvhost_dbg_gpu_reg_op *op) 608 struct nvgpu_dbg_gpu_reg_op *op)
610{ 609{
611 int err; 610 int err;
612 u32 buf_offset_lo, buf_offset_addr, num_offsets, offset; 611 u32 buf_offset_lo, buf_offset_addr, num_offsets, offset;
@@ -656,7 +655,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
656 655
657static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, 656static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
658 u32 *ctx_rd_count, u32 *ctx_wr_count, 657 u32 *ctx_rd_count, u32 *ctx_wr_count,
659 struct nvhost_dbg_gpu_reg_op *ops, 658 struct nvgpu_dbg_gpu_reg_op *ops,
660 u32 op_count) 659 u32 op_count)
661{ 660{
662 u32 i; 661 u32 i;
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.h b/drivers/gpu/nvgpu/gk20a/regops_gk20a.h
index 808e8bbe..0c244f58 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.h
@@ -1,5 +1,4 @@
1/* 1/*
2 *
3 * Tegra GK20A GPU Debugger Driver Register Ops 2 * Tegra GK20A GPU Debugger Driver Register Ops
4 * 3 *
5 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
@@ -16,8 +15,8 @@
16 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 17 */
19#ifndef __REGOPS_GK20A_H_ 18#ifndef REGOPS_GK20A_H
20#define __REGOPS_GK20A_H_ 19#define REGOPS_GK20A_H
21 20
22#include <linux/nvhost_dbg_gpu_ioctl.h> 21#include <linux/nvhost_dbg_gpu_ioctl.h>
23 22
@@ -27,11 +26,11 @@ struct regop_offset_range {
27}; 26};
28 27
29int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, 28int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
30 struct nvhost_dbg_gpu_reg_op *ops, 29 struct nvgpu_dbg_gpu_reg_op *ops,
31 u64 num_ops); 30 u64 num_ops);
32 31
33/* turn seriously unwieldy names -> something shorter */ 32/* turn seriously unwieldy names -> something shorter */
34#define REGOP(x) NVHOST_DBG_GPU_REG_OP_##x 33#define REGOP(x) NVGPU_DBG_GPU_REG_OP_##x
35 34
36static inline bool reg_op_is_gr_ctx(u8 type) 35static inline bool reg_op_is_gr_ctx(u8 type)
37{ 36{
@@ -51,4 +50,4 @@ static inline bool reg_op_is_read(u8 op)
51bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset); 50bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset);
52 51
53void gk20a_init_regops(struct gpu_ops *gops); 52void gk20a_init_regops(struct gpu_ops *gops);
54#endif /* __REGOPS_GK20A_H_ */ 53#endif /* REGOPS_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
index 214db398..6ffe1fd2 100644
--- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
@@ -1,8 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/semaphore_gk20a.h
3 *
4 * GK20A Semaphores
5 *
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
7 * 3 *
8 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
@@ -15,8 +11,8 @@
15 * more details. 11 * more details.
16 */ 12 */
17 13
18#ifndef _GK20A_SEMAPHORE_H_ 14#ifndef SEMAPHORE_GK20A_H
19#define _GK20A_SEMAPHORE_H_ 15#define SEMAPHORE_GK20A_H
20 16
21#include <linux/kref.h> 17#include <linux/kref.h>
22#include "gk20a_allocator.h" 18#include "gk20a_allocator.h"
@@ -35,6 +31,12 @@ struct gk20a_semaphore_pool {
35 struct gk20a_allocator alloc; 31 struct gk20a_allocator alloc;
36}; 32};
37 33
34enum gk20a_mem_rw_flag {
35 gk20a_mem_flag_none = 0,
36 gk20a_mem_flag_read_only = 1,
37 gk20a_mem_flag_write_only = 2,
38};
39
38/* A semaphore pool can be mapped to multiple GPU address spaces. */ 40/* A semaphore pool can be mapped to multiple GPU address spaces. */
39struct gk20a_semaphore_pool_map { 41struct gk20a_semaphore_pool_map {
40 u64 gpu_va; 42 u64 gpu_va;
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
index da9a0f5e..abbf6aa8 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/sync_gk20a.h
3 *
4 * GK20A Sync Framework Integration 2 * GK20A Sync Framework Integration
5 * 3 *
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
diff --git a/drivers/gpu/nvgpu/gk20a/therm_gk20a.h b/drivers/gpu/nvgpu/gk20a/therm_gk20a.h
index 3f67ee12..e670ec0e 100644
--- a/drivers/gpu/nvgpu/gk20a/therm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/therm_gk20a.h
@@ -1,9 +1,5 @@
1/* 1/*
2 * drivers/video/tegra/host/gk20a/therm_gk20a.h 2 * Copyright (c) 2011 - 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * GK20A Therm
5 *
6 * Copyright (c) 2011 - 2012, NVIDIA CORPORATION. All rights reserved.
7 * 3 *
8 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -14,12 +10,11 @@
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 11 * more details.
16 * 12 *
17 * You should have received a copy of the GNU General Public License along with 13 * You should have received a copy of the GNU General Public License
18 * this program; if not, write to the Free Software Foundation, Inc., 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 15 */
21#ifndef _NVHOST_THERM_GK20A_H_ 16#ifndef THERM_GK20A_H
22#define _NVHOST_THERM_GK20A_H_ 17#define THERM_GK20A_H
23 18
24/* priority for EXT_THERM_0 event set to highest */ 19/* priority for EXT_THERM_0 event set to highest */
25#define NV_THERM_EVT_EXT_THERM_0_INIT 0x3000100 20#define NV_THERM_EVT_EXT_THERM_0_INIT 0x3000100
@@ -30,4 +25,4 @@
30 25
31int gk20a_init_therm_support(struct gk20a *g); 26int gk20a_init_therm_support(struct gk20a *g);
32 27
33#endif /* _NVHOST_THERM_GK20A_H_ */ 28#endif /* THERM_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 7e70d5a4..73c690fd 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * GM20B ACR
3 *
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
5* 3 *
6 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation. 6 * version 2, as published by the Free Software Foundation.
@@ -24,6 +22,7 @@
24 22
25#include "gk20a/gk20a.h" 23#include "gk20a/gk20a.h"
26#include "gk20a/pmu_gk20a.h" 24#include "gk20a/pmu_gk20a.h"
25#include "gk20a/semaphore_gk20a.h"
27#include "hw_pwr_gm20b.h" 26#include "hw_pwr_gm20b.h"
28#include "mc_carveout_reg.h" 27#include "mc_carveout_reg.h"
29 28
diff --git a/drivers/gpu/nvgpu/gm20b/regops_gm20b.c b/drivers/gpu/nvgpu/gm20b/regops_gm20b.c
index e0f1fc0b..6c77f8d2 100644
--- a/drivers/gpu/nvgpu/gm20b/regops_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/regops_gm20b.c
@@ -1,5 +1,4 @@
1/* 1/*
2 *
3 * Tegra GK20A GPU Debugger Driver Register Ops 2 * Tegra GK20A GPU Debugger Driver Register Ops
4 * 3 *
5 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.