summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-11-04 08:14:28 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:12:01 -0400
commitb3f575074b66e8af1a9943874f9782b793fa7edc (patch)
treec4aaa1defc512cf5a896edc25445f169de184ece
parent797e4dd319bd2b9e13ce0e44a3bbbb75e4820330 (diff)
gpu: nvgpu: fix sparse warnings
Fix below sparse warnings : warning: Using plain integer as NULL pointer warning: symbol <variable/funcion> was not declared. Should it be static? warning: Initializer entry defined twice Also, remove dead functions Bug 1573254 Change-Id: I29d71ecc01c841233cf6b26c9088ca8874773469 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/593363 Reviewed-by: Amit Sharma (SW-TEGRA) <amisharma@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Sachin Nikam <snikam@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c40
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c7
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c21
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c24
-rw-r--r--drivers/gpu/nvgpu/gk20a/hal_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c24
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c29
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c16
-rw-r--r--drivers/gpu/nvgpu/gm20b/fb_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c78
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c6
-rw-r--r--drivers/gpu/nvgpu/gm20b/mm_gm20b.c7
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c8
-rw-r--r--drivers/gpu/nvgpu/gm20b/regops_gm20b.c27
28 files changed, 155 insertions, 205 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
index dd5a5cf3..0f587a30 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
@@ -46,7 +46,7 @@ int gk20a_as_alloc_share(struct gk20a_as *as,
46 46
47 gk20a_dbg_fn(""); 47 gk20a_dbg_fn("");
48 48
49 *out = 0; 49 *out = NULL;
50 as_share = kzalloc(sizeof(*as_share), GFP_KERNEL); 50 as_share = kzalloc(sizeof(*as_share), GFP_KERNEL);
51 if (!as_share) 51 if (!as_share)
52 return -ENOMEM; 52 return -ENOMEM;
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 47ea8052..053dc9d4 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -122,7 +122,7 @@ static void gk20a_cde_deallocate_contexts(struct gk20a *g)
122 } 122 }
123} 123}
124 124
125void gk20a_cde_stop(struct gk20a *g) 125static void gk20a_cde_stop(struct gk20a *g)
126{ 126{
127 struct gk20a_cde_app *cde_app = &g->cde_app; 127 struct gk20a_cde_app *cde_app = &g->cde_app;
128 128
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index f48ef35f..7f179d14 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -603,9 +603,9 @@ static void gk20a_free_error_notifiers(struct channel_gk20a *ch)
603 if (ch->error_notifier_ref) { 603 if (ch->error_notifier_ref) {
604 dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va); 604 dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va);
605 dma_buf_put(ch->error_notifier_ref); 605 dma_buf_put(ch->error_notifier_ref);
606 ch->error_notifier_ref = 0; 606 ch->error_notifier_ref = NULL;
607 ch->error_notifier = 0; 607 ch->error_notifier = NULL;
608 ch->error_notifier_va = 0; 608 ch->error_notifier_va = NULL;
609 } 609 }
610} 610}
611 611
@@ -785,7 +785,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
785 if (ch == NULL) { 785 if (ch == NULL) {
786 /* TBD: we want to make this virtualizable */ 786 /* TBD: we want to make this virtualizable */
787 gk20a_err(dev_from_gk20a(g), "out of hw chids"); 787 gk20a_err(dev_from_gk20a(g), "out of hw chids");
788 return 0; 788 return NULL;
789 } 789 }
790 790
791 ch->g = g; 791 ch->g = g;
@@ -795,7 +795,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
795 gk20a_err(dev_from_gk20a(g), 795 gk20a_err(dev_from_gk20a(g),
796 "failed to open gk20a channel, out of inst mem"); 796 "failed to open gk20a channel, out of inst mem");
797 797
798 return 0; 798 return NULL;
799 } 799 }
800 g->ops.fifo.bind_channel(ch); 800 g->ops.fifo.bind_channel(ch);
801 ch->pid = current->pid; 801 ch->pid = current->pid;
@@ -1265,18 +1265,6 @@ clean_up:
1265 return err; 1265 return err;
1266} 1266}
1267 1267
1268static inline int wfi_cmd_size(void)
1269{
1270 return 2;
1271}
1272void add_wfi_cmd(struct priv_cmd_entry *cmd, int *i)
1273{
1274 /* wfi */
1275 cmd->ptr[(*i)++] = 0x2001001E;
1276 /* handle, ignored */
1277 cmd->ptr[(*i)++] = 0x00000000;
1278}
1279
1280static inline bool check_gp_put(struct gk20a *g, 1268static inline bool check_gp_put(struct gk20a *g,
1281 struct channel_gk20a *c) 1269 struct channel_gk20a *c)
1282{ 1270{
@@ -1529,18 +1517,6 @@ void gk20a_channel_update(struct channel_gk20a *c, int nr_completed)
1529 schedule_work(&c->update_fn_work); 1517 schedule_work(&c->update_fn_work);
1530} 1518}
1531 1519
1532void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
1533{
1534 /* syncpoint_a */
1535 ptr[0] = 0x2001001C;
1536 /* payload */
1537 ptr[1] = thresh;
1538 /* syncpoint_b */
1539 ptr[2] = 0x2001001D;
1540 /* syncpt_id, switch_en, wait */
1541 ptr[3] = (id << 8) | 0x10;
1542}
1543
1544int gk20a_submit_channel_gpfifo(struct channel_gk20a *c, 1520int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1545 struct nvgpu_gpfifo *gpfifo, 1521 struct nvgpu_gpfifo *gpfifo,
1546 u32 num_entries, 1522 u32 num_entries,
@@ -1760,11 +1736,6 @@ clean_up:
1760 return err; 1736 return err;
1761} 1737}
1762 1738
1763void gk20a_remove_channel_support(struct channel_gk20a *c)
1764{
1765
1766}
1767
1768int gk20a_init_channel_support(struct gk20a *g, u32 chid) 1739int gk20a_init_channel_support(struct gk20a *g, u32 chid)
1769{ 1740{
1770 struct channel_gk20a *c = g->fifo.channel+chid; 1741 struct channel_gk20a *c = g->fifo.channel+chid;
@@ -1772,7 +1743,6 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
1772 c->in_use = false; 1743 c->in_use = false;
1773 c->hw_chid = chid; 1744 c->hw_chid = chid;
1774 c->bound = false; 1745 c->bound = false;
1775 c->remove_support = gk20a_remove_channel_support;
1776 mutex_init(&c->jobs_lock); 1746 mutex_init(&c->jobs_lock);
1777 mutex_init(&c->submit_lock); 1747 mutex_init(&c->submit_lock);
1778 INIT_LIST_HEAD(&c->jobs); 1748 INIT_LIST_HEAD(&c->jobs);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index bddcf86e..746a2de3 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -53,8 +53,8 @@ static void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
53 ptr[3] = (id << 8) | 0x10; 53 ptr[3] = (id << 8) | 0x10;
54} 54}
55 55
56int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id, 56static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
57 u32 thresh, struct priv_cmd_entry **entry, 57 u32 id, u32 thresh, struct priv_cmd_entry **entry,
58 struct gk20a_fence **fence) 58 struct gk20a_fence **fence)
59{ 59{
60 struct gk20a_channel_syncpt *sp = 60 struct gk20a_channel_syncpt *sp =
@@ -84,7 +84,7 @@ int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id,
84 return 0; 84 return 0;
85} 85}
86 86
87int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd, 87static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
88 struct priv_cmd_entry **entry, 88 struct priv_cmd_entry **entry,
89 struct gk20a_fence **fence) 89 struct gk20a_fence **fence)
90{ 90{
@@ -221,7 +221,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
221 return 0; 221 return 0;
222} 222}
223 223
224int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s, 224static int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
225 struct priv_cmd_entry **entry, 225 struct priv_cmd_entry **entry,
226 struct gk20a_fence **fence) 226 struct gk20a_fence **fence)
227{ 227{
@@ -231,7 +231,7 @@ int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
231 entry, fence); 231 entry, fence);
232} 232}
233 233
234int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s, 234static int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
235 struct priv_cmd_entry **entry, 235 struct priv_cmd_entry **entry,
236 struct gk20a_fence **fence) 236 struct gk20a_fence **fence)
237{ 237{
@@ -243,7 +243,7 @@ int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
243 entry, fence); 243 entry, fence);
244} 244}
245 245
246int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s, 246static int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
247 int wait_fence_fd, 247 int wait_fence_fd,
248 struct priv_cmd_entry **entry, 248 struct priv_cmd_entry **entry,
249 struct gk20a_fence **fence, 249 struct gk20a_fence **fence,
@@ -257,7 +257,7 @@ int gk20a_channel_syncpt_incr_user(struct gk20a_channel_sync *s,
257 entry, fence); 257 entry, fence);
258} 258}
259 259
260void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s) 260static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
261{ 261{
262 struct gk20a_channel_syncpt *sp = 262 struct gk20a_channel_syncpt *sp =
263 container_of(s, struct gk20a_channel_syncpt, ops); 263 container_of(s, struct gk20a_channel_syncpt, ops);
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 39941aae..bd24e269 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -58,7 +58,8 @@ static int alloc_session(struct dbg_session_gk20a **_dbg_s)
58 return 0; 58 return 0;
59} 59}
60 60
61int gk20a_dbg_gpu_do_dev_open(struct inode *inode, struct file *filp, bool is_profiler) 61static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
62 struct file *filp, bool is_profiler)
62{ 63{
63 struct dbg_session_gk20a *dbg_session; 64 struct dbg_session_gk20a *dbg_session;
64 struct gk20a *g; 65 struct gk20a *g;
@@ -504,7 +505,8 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
504 505
505 gk20a_dbg_fn("Copying regops from userspace"); 506 gk20a_dbg_fn("Copying regops from userspace");
506 507
507 if (copy_from_user(ops, (void *)(uintptr_t)args->ops, ops_size)) { 508 if (copy_from_user(ops, (void __user *)(uintptr_t)args->ops,
509 ops_size)) {
508 dev_err(dev, "copy_from_user failed!"); 510 dev_err(dev, "copy_from_user failed!");
509 err = -EFAULT; 511 err = -EFAULT;
510 goto clean_up; 512 goto clean_up;
@@ -542,7 +544,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
542 544
543 gk20a_dbg_fn("Copying result to userspace"); 545 gk20a_dbg_fn("Copying result to userspace");
544 546
545 if (copy_to_user((void *)(uintptr_t)args->ops, ops, ops_size)) { 547 if (copy_to_user((void __user *)(uintptr_t)args->ops, ops, ops_size)) {
546 dev_err(dev, "copy_to_user failed!"); 548 dev_err(dev, "copy_to_user failed!");
547 err = -EFAULT; 549 err = -EFAULT;
548 goto clean_up; 550 goto clean_up;
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index 2cc3d38d..676b493b 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -32,7 +32,7 @@
32#include "hw_pbdma_gk20a.h" 32#include "hw_pbdma_gk20a.h"
33 33
34unsigned int gk20a_debug_trace_cmdbuf; 34unsigned int gk20a_debug_trace_cmdbuf;
35struct platform_device *gk20a_device; 35static struct platform_device *gk20a_device;
36 36
37struct gk20a_debug_output { 37struct gk20a_debug_output {
38 void (*fn)(void *ctx, const char *str, size_t len); 38 void (*fn)(void *ctx, const char *str, size_t len);
@@ -89,7 +89,8 @@ static inline void gk20a_debug_write_to_seqfile(void *ctx, const char *str,
89 seq_write((struct seq_file *)ctx, str, len); 89 seq_write((struct seq_file *)ctx, str, len);
90} 90}
91 91
92void gk20a_debug_output(struct gk20a_debug_output *o, const char *fmt, ...) 92static void gk20a_debug_output(struct gk20a_debug_output *o,
93 const char *fmt, ...)
93{ 94{
94 va_list args; 95 va_list args;
95 int len; 96 int len;
@@ -159,7 +160,7 @@ static void gk20a_debug_show_channel(struct gk20a *g,
159 gk20a_debug_output(o, "\n"); 160 gk20a_debug_output(o, "\n");
160} 161}
161 162
162void gk20a_debug_show_dump(struct platform_device *pdev, 163static void gk20a_debug_show_dump(struct platform_device *pdev,
163 struct gk20a_debug_output *o) 164 struct gk20a_debug_output *o)
164{ 165{
165 struct gk20a_platform *platform = gk20a_get_platform(pdev); 166 struct gk20a_platform *platform = gk20a_get_platform(pdev);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 6e05d645..4095b079 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -152,7 +152,7 @@ static int init_engine_info(struct fifo_gk20a *f)
152 return 0; 152 return 0;
153} 153}
154 154
155void gk20a_remove_fifo_support(struct fifo_gk20a *f) 155static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
156{ 156{
157 struct gk20a *g = f->g; 157 struct gk20a *g = f->g;
158 struct device *d = dev_from_gk20a(g); 158 struct device *d = dev_from_gk20a(g);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index ad1a940d..276e5c3e 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -150,7 +150,6 @@ static const struct file_operations gk20a_prof_ops = {
150 .unlocked_ioctl = gk20a_dbg_gpu_dev_ioctl, 150 .unlocked_ioctl = gk20a_dbg_gpu_dev_ioctl,
151 /* .mmap = gk20a_prof_gpu_dev_mmap,*/ 151 /* .mmap = gk20a_prof_gpu_dev_mmap,*/
152 /*int (*mmap) (struct file *, struct vm_area_struct *);*/ 152 /*int (*mmap) (struct file *, struct vm_area_struct *);*/
153 .compat_ioctl = gk20a_dbg_gpu_dev_ioctl,
154#ifdef CONFIG_COMPAT 153#ifdef CONFIG_COMPAT
155 .compat_ioctl = gk20a_dbg_gpu_dev_ioctl, 154 .compat_ioctl = gk20a_dbg_gpu_dev_ioctl,
156#endif 155#endif
@@ -206,11 +205,11 @@ static void kunmap_and_free_iopage(void **kvaddr, struct page **page)
206{ 205{
207 if (*kvaddr) { 206 if (*kvaddr) {
208 kunmap(*kvaddr); 207 kunmap(*kvaddr);
209 *kvaddr = 0; 208 *kvaddr = NULL;
210 } 209 }
211 if (*page) { 210 if (*page) {
212 __free_page(*page); 211 __free_page(*page);
213 *page = 0; 212 *page = NULL;
214 } 213 }
215} 214}
216 215
@@ -606,11 +605,11 @@ static void gk20a_remove_support(struct platform_device *dev)
606 605
607 if (g->regs) { 606 if (g->regs) {
608 iounmap(g->regs); 607 iounmap(g->regs);
609 g->regs = 0; 608 g->regs = NULL;
610 } 609 }
611 if (g->bar1) { 610 if (g->bar1) {
612 iounmap(g->bar1); 611 iounmap(g->bar1);
613 g->bar1 = 0; 612 g->bar1 = NULL;
614 } 613 }
615} 614}
616 615
@@ -1063,11 +1062,11 @@ struct channel_gk20a *gk20a_get_channel_from_file(int fd)
1063 struct channel_gk20a *ch; 1062 struct channel_gk20a *ch;
1064 struct file *f = fget(fd); 1063 struct file *f = fget(fd);
1065 if (!f) 1064 if (!f)
1066 return 0; 1065 return NULL;
1067 1066
1068 if (f->f_op != &gk20a_channel_ops) { 1067 if (f->f_op != &gk20a_channel_ops) {
1069 fput(f); 1068 fput(f);
1070 return 0; 1069 return NULL;
1071 } 1070 }
1072 1071
1073 ch = (struct channel_gk20a *)f->private_data; 1072 ch = (struct channel_gk20a *)f->private_data;
@@ -1119,7 +1118,7 @@ static void gk20a_pm_shutdown(struct platform_device *pdev)
1119} 1118}
1120 1119
1121#ifdef CONFIG_PM 1120#ifdef CONFIG_PM
1122const struct dev_pm_ops gk20a_pm_ops = { 1121static const struct dev_pm_ops gk20a_pm_ops = {
1123#if defined(CONFIG_PM_RUNTIME) && !defined(CONFIG_PM_GENERIC_DOMAINS) 1122#if defined(CONFIG_PM_RUNTIME) && !defined(CONFIG_PM_GENERIC_DOMAINS)
1124 .runtime_resume = gk20a_pm_enable_clk, 1123 .runtime_resume = gk20a_pm_enable_clk,
1125 .runtime_suspend = gk20a_pm_disable_clk, 1124 .runtime_suspend = gk20a_pm_disable_clk,
@@ -1261,7 +1260,7 @@ static int gk20a_pm_init(struct platform_device *dev)
1261 return err; 1260 return err;
1262} 1261}
1263 1262
1264int gk20a_secure_page_alloc(struct platform_device *pdev) 1263static int gk20a_secure_page_alloc(struct platform_device *pdev)
1265{ 1264{
1266 struct gk20a_platform *platform = platform_get_drvdata(pdev); 1265 struct gk20a_platform *platform = platform_get_drvdata(pdev);
1267 int err = 0; 1266 int err = 0;
@@ -1466,13 +1465,15 @@ static int __exit gk20a_remove(struct platform_device *dev)
1466 1465
1467 gk20a_user_deinit(dev); 1466 gk20a_user_deinit(dev);
1468 1467
1469 set_gk20a(dev, 0); 1468 set_gk20a(dev, NULL);
1470#ifdef CONFIG_DEBUG_FS 1469#ifdef CONFIG_DEBUG_FS
1471 debugfs_remove(g->debugfs_ltc_enabled); 1470 debugfs_remove(g->debugfs_ltc_enabled);
1472 debugfs_remove(g->debugfs_gr_idle_timeout_default); 1471 debugfs_remove(g->debugfs_gr_idle_timeout_default);
1473 debugfs_remove(g->debugfs_timeouts_enabled); 1472 debugfs_remove(g->debugfs_timeouts_enabled);
1474#endif 1473#endif
1475 1474
1475 gk20a_remove_sysfs(&dev->dev);
1476
1476 kfree(g); 1477 kfree(g);
1477 1478
1478#ifdef CONFIG_PM_RUNTIME 1479#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 825cb886..e3392ae8 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -715,6 +715,7 @@ static inline int support_gk20a_pmu(struct platform_device *dev)
715} 715}
716 716
717void gk20a_create_sysfs(struct platform_device *dev); 717void gk20a_create_sysfs(struct platform_device *dev);
718void gk20a_remove_sysfs(struct device *dev);
718 719
719#define GK20A_BAR0_IORESOURCE_MEM 0 720#define GK20A_BAR0_IORESOURCE_MEM 0
720#define GK20A_BAR1_IORESOURCE_MEM 1 721#define GK20A_BAR1_IORESOURCE_MEM 1
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c b/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c
index 0e3b0cb3..c7a34076 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c
@@ -29,7 +29,7 @@ struct gating_desc {
29 u32 disable; 29 u32 disable;
30}; 30};
31/* slcg gr */ 31/* slcg gr */
32const struct gating_desc gk20a_slcg_gr[] = { 32static const struct gating_desc gk20a_slcg_gr[] = {
33 {.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x03fffffe}, 33 {.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x03fffffe},
34 {.addr = 0x00409894, .prod = 0x00000040, .disable = 0x0003fffe}, 34 {.addr = 0x00409894, .prod = 0x00000040, .disable = 0x0003fffe},
35 {.addr = 0x004078c4, .prod = 0x00000000, .disable = 0x000001fe}, 35 {.addr = 0x004078c4, .prod = 0x00000000, .disable = 0x000001fe},
@@ -97,7 +97,7 @@ const struct gating_desc gk20a_slcg_gr[] = {
97}; 97};
98 98
99/* slcg perf */ 99/* slcg perf */
100const struct gating_desc gk20a_slcg_perf[] = { 100static const struct gating_desc gk20a_slcg_perf[] = {
101 {.addr = 0x001be018, .prod = 0x000001ff, .disable = 0x00000000}, 101 {.addr = 0x001be018, .prod = 0x000001ff, .disable = 0x00000000},
102 {.addr = 0x001bc018, .prod = 0x000001ff, .disable = 0x00000000}, 102 {.addr = 0x001bc018, .prod = 0x000001ff, .disable = 0x00000000},
103 {.addr = 0x001b8018, .prod = 0x000001ff, .disable = 0x00000000}, 103 {.addr = 0x001b8018, .prod = 0x000001ff, .disable = 0x00000000},
@@ -105,7 +105,7 @@ const struct gating_desc gk20a_slcg_perf[] = {
105}; 105};
106 106
107/* blcg gr */ 107/* blcg gr */
108const struct gating_desc gk20a_blcg_gr[] = { 108static const struct gating_desc gk20a_blcg_gr[] = {
109 {.addr = 0x004041f0, .prod = 0x00004046, .disable = 0x00000000}, 109 {.addr = 0x004041f0, .prod = 0x00004046, .disable = 0x00000000},
110 {.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000}, 110 {.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000},
111 {.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000}, 111 {.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000},
@@ -185,7 +185,7 @@ const struct gating_desc gk20a_blcg_gr[] = {
185}; 185};
186 186
187/* pg gr */ 187/* pg gr */
188const struct gating_desc gk20a_pg_gr[] = { 188static const struct gating_desc gk20a_pg_gr[] = {
189 {.addr = 0x004041f8, .prod = 0x10940000, .disable = 0x00000000}, 189 {.addr = 0x004041f8, .prod = 0x10940000, .disable = 0x00000000},
190 {.addr = 0x004041fc, .prod = 0xff00a725, .disable = 0x00000000}, 190 {.addr = 0x004041fc, .prod = 0xff00a725, .disable = 0x00000000},
191 {.addr = 0x00409898, .prod = 0x10140000, .disable = 0x00000000}, 191 {.addr = 0x00409898, .prod = 0x10140000, .disable = 0x00000000},
@@ -291,7 +291,7 @@ const struct gating_desc gk20a_pg_gr[] = {
291}; 291};
292 292
293/* therm gr */ 293/* therm gr */
294const struct gating_desc gk20a_slcg_therm[] = { 294static const struct gating_desc gk20a_slcg_therm[] = {
295 {.addr = 0x000206b8, .prod = 0x00000000, .disable = 0x0000000f}, 295 {.addr = 0x000206b8, .prod = 0x00000000, .disable = 0x0000000f},
296}; 296};
297 297
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
index eaccdbc7..cd9a9fca 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
@@ -96,7 +96,7 @@ static int gr_gk20a_get_netlist_name(int index, char *name)
96 return -1; 96 return -1;
97} 97}
98 98
99bool gr_gk20a_is_firmware_defined(void) 99static bool gr_gk20a_is_firmware_defined(void)
100{ 100{
101#ifdef GK20A_NETLIST_IMAGE_FW_NAME 101#ifdef GK20A_NETLIST_IMAGE_FW_NAME
102 return true; 102 return true;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index f87608d1..a6432f41 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -491,7 +491,7 @@ struct fecs_method_op_gk20a {
491 491
492}; 492};
493 493
494int gr_gk20a_submit_fecs_method_op(struct gk20a *g, 494static int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
495 struct fecs_method_op_gk20a op) 495 struct fecs_method_op_gk20a op)
496{ 496{
497 struct gr_gk20a *gr = &g->gr; 497 struct gr_gk20a *gr = &g->gr;
@@ -524,7 +524,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
524 return ret; 524 return ret;
525} 525}
526 526
527int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret) 527static int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
528{ 528{
529 return gr_gk20a_submit_fecs_method_op(g, 529 return gr_gk20a_submit_fecs_method_op(g,
530 (struct fecs_method_op_gk20a) { 530 (struct fecs_method_op_gk20a) {
@@ -544,14 +544,16 @@ int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
544int gr_gk20a_disable_ctxsw(struct gk20a *g) 544int gr_gk20a_disable_ctxsw(struct gk20a *g)
545{ 545{
546 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 546 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
547 return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_stop_ctxsw_v(), 0); 547 return gr_gk20a_ctrl_ctxsw(g,
548 gr_fecs_method_push_adr_stop_ctxsw_v(), NULL);
548} 549}
549 550
550/* Start processing (continue) context switches at FECS */ 551/* Start processing (continue) context switches at FECS */
551int gr_gk20a_enable_ctxsw(struct gk20a *g) 552int gr_gk20a_enable_ctxsw(struct gk20a *g)
552{ 553{
553 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 554 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
554 return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_start_ctxsw_v(), 0); 555 return gr_gk20a_ctrl_ctxsw(g,
556 gr_fecs_method_push_adr_start_ctxsw_v(), NULL);
555} 557}
556 558
557 559
@@ -2105,7 +2107,7 @@ void gr_gk20a_load_ctxsw_ucode_boot(struct gk20a *g, u64 addr_base,
2105 gr_fecs_bootvec_vec_f(segments->boot_entry)); 2107 gr_fecs_bootvec_vec_f(segments->boot_entry));
2106} 2108}
2107 2109
2108int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base, 2110static int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
2109 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset) 2111 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset)
2110{ 2112{
2111 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(), 2113 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(),
@@ -2176,7 +2178,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2176 2178
2177 gk20a_dbg_fn(""); 2179 gk20a_dbg_fn("");
2178 2180
2179 ret = gr_gk20a_ctx_wait_ucode(g, 0, 0, 2181 ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL,
2180 GR_IS_UCODE_OP_EQUAL, 2182 GR_IS_UCODE_OP_EQUAL,
2181 eUcodeHandshakeInitComplete, 2183 eUcodeHandshakeInitComplete,
2182 GR_IS_UCODE_OP_SKIP, 0); 2184 GR_IS_UCODE_OP_SKIP, 0);
@@ -3794,7 +3796,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
3794 return 0; 3796 return 0;
3795} 3797}
3796 3798
3797int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) 3799static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
3798{ 3800{
3799 int i, ret; 3801 int i, ret;
3800 3802
@@ -4453,7 +4455,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4453 return -ETIMEDOUT; 4455 return -ETIMEDOUT;
4454} 4456}
4455 4457
4456int gr_gk20a_init_ctxsw(struct gk20a *g) 4458static int gr_gk20a_init_ctxsw(struct gk20a *g)
4457{ 4459{
4458 struct gr_gk20a *gr = &g->gr; 4460 struct gr_gk20a *gr = &g->gr;
4459 u32 err = 0; 4461 u32 err = 0;
@@ -4481,7 +4483,7 @@ out:
4481 return 0; 4483 return 0;
4482} 4484}
4483 4485
4484int gk20a_init_gr_reset_enable_hw(struct gk20a *g) 4486static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
4485{ 4487{
4486 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; 4488 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load;
4487 unsigned long end_jiffies = jiffies + 4489 unsigned long end_jiffies = jiffies +
@@ -5859,7 +5861,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
5859 u32 *priv_offset); 5861 u32 *priv_offset);
5860 5862
5861/* This function will decode a priv address and return the partition type and numbers. */ 5863/* This function will decode a priv address and return the partition type and numbers. */
5862int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, 5864static int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
5863 int *addr_type, /* enum ctxsw_addr_type */ 5865 int *addr_type, /* enum ctxsw_addr_type */
5864 u32 *gpc_num, u32 *tpc_num, u32 *ppc_num, u32 *be_num, 5866 u32 *gpc_num, u32 *tpc_num, u32 *ppc_num, u32 *be_num,
5865 u32 *broadcast_flags) 5867 u32 *broadcast_flags)
@@ -6154,7 +6156,7 @@ static void init_sm_dsm_reg_info(void)
6154 * which makes it impossible to know externally whether a ctx 6156 * which makes it impossible to know externally whether a ctx
6155 * write will actually occur. so later we should put a lazy, 6157 * write will actually occur. so later we should put a lazy,
6156 * map-and-hold system in the patch write state */ 6158 * map-and-hold system in the patch write state */
6157int gr_gk20a_ctx_patch_smpc(struct gk20a *g, 6159static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
6158 struct channel_ctx_gk20a *ch_ctx, 6160 struct channel_ctx_gk20a *ch_ctx,
6159 u32 addr, u32 data, 6161 u32 addr, u32 data,
6160 u8 *context) 6162 u8 *context)
diff --git a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
index 208c1ef0..7000f682 100644
--- a/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/hal_gk20a.c
@@ -28,7 +28,7 @@
28#include "clk_gk20a.h" 28#include "clk_gk20a.h"
29#include "regops_gk20a.h" 29#include "regops_gk20a.h"
30 30
31struct gpu_ops gk20a_ops = { 31static struct gpu_ops gk20a_ops = {
32 .clock_gating = { 32 .clock_gating = {
33 .slcg_gr_load_gating_prod = 33 .slcg_gr_load_gating_prod =
34 gr_gk20a_slcg_gr_load_gating_prod, 34 gr_gk20a_slcg_gr_load_gating_prod,
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index aa094dc7..71d87b5c 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -181,7 +181,7 @@ static void gk20a_ltc_init_fs_state(struct gk20a *g)
181 g->max_ltc_count = g->ltc_count = 1; 181 g->max_ltc_count = g->ltc_count = 1;
182} 182}
183 183
184void gk20a_ltc_isr(struct gk20a *g) 184static void gk20a_ltc_isr(struct gk20a *g)
185{ 185{
186 u32 intr; 186 u32 intr;
187 187
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index c121d6bf..7043a81d 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -263,7 +263,7 @@ static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
263 return 0; 263 return 0;
264} 264}
265 265
266void gk20a_remove_mm_support(struct mm_gk20a *mm) 266static void gk20a_remove_mm_support(struct mm_gk20a *mm)
267{ 267{
268 struct gk20a *g = mm->g; 268 struct gk20a *g = mm->g;
269 struct device *d = dev_from_gk20a(g); 269 struct device *d = dev_from_gk20a(g);
@@ -405,7 +405,7 @@ err_out:
405 return -ENOMEM; 405 return -ENOMEM;
406} 406}
407 407
408void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle, 408static void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle,
409 struct sg_table *sgt, u32 order, 409 struct sg_table *sgt, u32 order,
410 size_t size) 410 size_t size)
411{ 411{
@@ -415,7 +415,7 @@ void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle,
415 kfree(sgt); 415 kfree(sgt);
416} 416}
417 417
418int map_gmmu_phys_pages(void *handle, struct sg_table *sgt, 418static int map_gmmu_phys_pages(void *handle, struct sg_table *sgt,
419 void **va, size_t size) 419 void **va, size_t size)
420{ 420{
421 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); 421 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
@@ -423,7 +423,7 @@ int map_gmmu_phys_pages(void *handle, struct sg_table *sgt,
423 return 0; 423 return 0;
424} 424}
425 425
426void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va) 426static void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va)
427{ 427{
428 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); 428 FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length);
429} 429}
@@ -913,7 +913,7 @@ static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
913 return mapped_buffer; 913 return mapped_buffer;
914 node = rb_next(&mapped_buffer->node); 914 node = rb_next(&mapped_buffer->node);
915 } 915 }
916 return 0; 916 return NULL;
917} 917}
918 918
919static struct mapped_buffer_node *find_mapped_buffer_locked( 919static struct mapped_buffer_node *find_mapped_buffer_locked(
@@ -931,7 +931,7 @@ static struct mapped_buffer_node *find_mapped_buffer_locked(
931 else 931 else
932 return mapped_buffer; 932 return mapped_buffer;
933 } 933 }
934 return 0; 934 return NULL;
935} 935}
936 936
937static struct mapped_buffer_node *find_mapped_buffer_range_locked( 937static struct mapped_buffer_node *find_mapped_buffer_range_locked(
@@ -948,7 +948,7 @@ static struct mapped_buffer_node *find_mapped_buffer_range_locked(
948 else 948 else
949 node = node->rb_right; 949 node = node->rb_right;
950 } 950 }
951 return 0; 951 return NULL;
952} 952}
953 953
954#define BFR_ATTRS (sizeof(nvmap_bfr_param)/sizeof(nvmap_bfr_param[0])) 954#define BFR_ATTRS (sizeof(nvmap_bfr_param)/sizeof(nvmap_bfr_param[0]))
@@ -1177,7 +1177,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
1177 /* unmap here needs to know the page size we assigned at mapping */ 1177 /* unmap here needs to know the page size we assigned at mapping */
1178 err = update_gmmu_ptes_locked(vm, 1178 err = update_gmmu_ptes_locked(vm,
1179 pgsz_idx, 1179 pgsz_idx,
1180 0, /* n/a for unmap */ 1180 NULL, /* n/a for unmap */
1181 0, 1181 0,
1182 vaddr, 1182 vaddr,
1183 vaddr + size - 1, 1183 vaddr + size - 1,
@@ -1209,7 +1209,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
1209 bool user_mapped, 1209 bool user_mapped,
1210 int rw_flag) 1210 int rw_flag)
1211{ 1211{
1212 struct mapped_buffer_node *mapped_buffer = 0; 1212 struct mapped_buffer_node *mapped_buffer = NULL;
1213 1213
1214 mapped_buffer = 1214 mapped_buffer =
1215 find_mapped_buffer_reverse_locked(&vm->mapped_buffers, 1215 find_mapped_buffer_reverse_locked(&vm->mapped_buffers,
@@ -1278,7 +1278,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
1278 struct gk20a *g = gk20a_from_vm(vm); 1278 struct gk20a *g = gk20a_from_vm(vm);
1279 struct gk20a_allocator *ctag_allocator = &g->gr.comp_tags; 1279 struct gk20a_allocator *ctag_allocator = &g->gr.comp_tags;
1280 struct device *d = dev_from_vm(vm); 1280 struct device *d = dev_from_vm(vm);
1281 struct mapped_buffer_node *mapped_buffer = 0; 1281 struct mapped_buffer_node *mapped_buffer = NULL;
1282 bool inserted = false, va_allocated = false; 1282 bool inserted = false, va_allocated = false;
1283 u32 gmmu_page_size = 0; 1283 u32 gmmu_page_size = 0;
1284 u64 map_offset = 0; 1284 u64 map_offset = 0;
@@ -1991,7 +1991,7 @@ static int gk20a_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr,
1991 return gk20a_vm_put_empty(vm, vaddr, num_pages, pgsz_idx); 1991 return gk20a_vm_put_empty(vm, vaddr, num_pages, pgsz_idx);
1992} 1992}
1993 1993
1994void gk20a_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr, 1994static void gk20a_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
1995 u64 size, u32 pgsz_idx) { 1995 u64 size, u32 pgsz_idx) {
1996 struct gk20a *g = vm->mm->g; 1996 struct gk20a *g = vm->mm->g;
1997 1997
@@ -2536,7 +2536,7 @@ int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
2536 ch->vm = vm; 2536 ch->vm = vm;
2537 err = channel_gk20a_commit_va(ch); 2537 err = channel_gk20a_commit_va(ch);
2538 if (err) 2538 if (err)
2539 ch->vm = 0; 2539 ch->vm = NULL;
2540 2540
2541 return err; 2541 return err;
2542} 2542}
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
index 185d661e..b75420e6 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
@@ -37,7 +37,7 @@
37#define TEGRA_DDR4_BW_PER_FREQ 16 37#define TEGRA_DDR4_BW_PER_FREQ 16
38 38
39extern struct device tegra_vpr_dev; 39extern struct device tegra_vpr_dev;
40struct gk20a_platform t132_gk20a_tegra_platform; 40static struct gk20a_platform t132_gk20a_tegra_platform;
41 41
42struct gk20a_emc_params { 42struct gk20a_emc_params {
43 long bw_ratio; 43 long bw_ratio;
@@ -142,7 +142,7 @@ fail:
142 * This function returns the minimum emc clock based on gpu frequency 142 * This function returns the minimum emc clock based on gpu frequency
143 */ 143 */
144 144
145long gk20a_tegra_get_emc_rate(struct gk20a *g, 145static long gk20a_tegra_get_emc_rate(struct gk20a *g,
146 struct gk20a_emc_params *emc_params, long freq) 146 struct gk20a_emc_params *emc_params, long freq)
147{ 147{
148 long hz; 148 long hz;
@@ -197,7 +197,7 @@ static void gk20a_tegra_prescale(struct platform_device *pdev)
197 * 197 *
198 */ 198 */
199 199
200void gk20a_tegra_calibrate_emc(struct platform_device *pdev, 200static void gk20a_tegra_calibrate_emc(struct platform_device *pdev,
201 struct gk20a_emc_params *emc_params) 201 struct gk20a_emc_params *emc_params)
202{ 202{
203 enum tegra_chipid cid = tegra_get_chipid(); 203 enum tegra_chipid cid = tegra_get_chipid();
@@ -271,7 +271,7 @@ static int gk20a_tegra_unrailgate(struct platform_device *pdev)
271 return ret; 271 return ret;
272} 272}
273 273
274struct { 274static struct {
275 char *name; 275 char *name;
276 unsigned long default_rate; 276 unsigned long default_rate;
277} tegra_gk20a_clocks[] = { 277} tegra_gk20a_clocks[] = {
@@ -472,7 +472,7 @@ static int gk20a_tegra_suspend(struct device *dev)
472 return 0; 472 return 0;
473} 473}
474 474
475struct gk20a_platform t132_gk20a_tegra_platform = { 475static struct gk20a_platform t132_gk20a_tegra_platform = {
476 .has_syncpoints = true, 476 .has_syncpoints = true,
477 477
478 /* power management configuration */ 478 /* power management configuration */
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 3fa7e53c..47ee7a1b 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -201,7 +201,7 @@ static void set_pmu_cmdline_args_falctracesize_v1(
201 pmu->args_v1.falc_trace_size = size; 201 pmu->args_v1.falc_trace_size = size;
202} 202}
203 203
204int find_hex_in_string(char *strings, struct gk20a *g) 204static int find_hex_in_string(char *strings, struct gk20a *g)
205{ 205{
206 u32 i = 0, j = strlen(strings); 206 u32 i = 0, j = strlen(strings);
207 for (; i < j; i++) { 207 for (; i < j; i++) {
@@ -212,7 +212,7 @@ int find_hex_in_string(char *strings, struct gk20a *g)
212 return 0xFF; 212 return 0xFF;
213} 213}
214 214
215void printtrace(struct pmu_gk20a *pmu) 215static void printtrace(struct pmu_gk20a *pmu)
216{ 216{
217 u32 i = 0, j = 0, k, l, m, count; 217 u32 i = 0, j = 0, k, l, m, count;
218 char *trace = pmu->trace_buf.cpuva; 218 char *trace = pmu->trace_buf.cpuva;
@@ -1926,7 +1926,7 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu)
1926 gk20a_allocator_destroy(&pmu->dmem); 1926 gk20a_allocator_destroy(&pmu->dmem);
1927} 1927}
1928 1928
1929int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) 1929static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
1930{ 1930{
1931 struct pmu_gk20a *pmu = &g->pmu; 1931 struct pmu_gk20a *pmu = &g->pmu;
1932 1932
@@ -2022,7 +2022,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
2022 return err; 2022 return err;
2023} 2023}
2024 2024
2025int gk20a_init_pmu_setup_sw(struct gk20a *g) 2025static int gk20a_init_pmu_setup_sw(struct gk20a *g)
2026{ 2026{
2027 struct pmu_gk20a *pmu = &g->pmu; 2027 struct pmu_gk20a *pmu = &g->pmu;
2028 struct mm_gk20a *mm = &g->mm; 2028 struct mm_gk20a *mm = &g->mm;
@@ -2219,7 +2219,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
2219 } 2219 }
2220} 2220}
2221 2221
2222int gk20a_init_pmu_setup_hw1(struct gk20a *g) 2222static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
2223{ 2223{
2224 struct pmu_gk20a *pmu = &g->pmu; 2224 struct pmu_gk20a *pmu = &g->pmu;
2225 int err; 2225 int err;
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index d9984e03..fe444c78 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -577,7 +577,7 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
577 if (!dbg_s->ch) { 577 if (!dbg_s->ch) {
578 gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound"); 578 gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound");
579 op->status = REGOP(STATUS_UNSUPPORTED_OP); 579 op->status = REGOP(STATUS_UNSUPPORTED_OP);
580 return -ENODEV; 580 return valid;
581 } 581 }
582 582
583 /* binary search context list */ 583 /* binary search context list */
@@ -697,67 +697,68 @@ bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset)
697 return valid; 697 return valid;
698} 698}
699 699
700const struct regop_offset_range *gk20a_get_global_whitelist_ranges(void) 700static const struct regop_offset_range *gk20a_get_global_whitelist_ranges(void)
701{ 701{
702 return gk20a_global_whitelist_ranges; 702 return gk20a_global_whitelist_ranges;
703} 703}
704 704
705int gk20a_get_global_whitelist_ranges_count(void) 705static int gk20a_get_global_whitelist_ranges_count(void)
706{ 706{
707 return gk20a_global_whitelist_ranges_count; 707 return gk20a_global_whitelist_ranges_count;
708} 708}
709 709
710const struct regop_offset_range *gk20a_get_context_whitelist_ranges(void) 710static const struct regop_offset_range *gk20a_get_context_whitelist_ranges(void)
711{ 711{
712 return gk20a_context_whitelist_ranges; 712 return gk20a_context_whitelist_ranges;
713} 713}
714 714
715int gk20a_get_context_whitelist_ranges_count(void) 715static int gk20a_get_context_whitelist_ranges_count(void)
716{ 716{
717 return gk20a_context_whitelist_ranges_count; 717 return gk20a_context_whitelist_ranges_count;
718} 718}
719 719
720const u32 *gk20a_get_runcontrol_whitelist(void) 720static const u32 *gk20a_get_runcontrol_whitelist(void)
721{ 721{
722 return gk20a_runcontrol_whitelist; 722 return gk20a_runcontrol_whitelist;
723} 723}
724 724
725int gk20a_get_runcontrol_whitelist_count(void) 725static int gk20a_get_runcontrol_whitelist_count(void)
726{ 726{
727 return gk20a_runcontrol_whitelist_count; 727 return gk20a_runcontrol_whitelist_count;
728} 728}
729 729
730const struct regop_offset_range *gk20a_get_runcontrol_whitelist_ranges(void) 730static const
731struct regop_offset_range *gk20a_get_runcontrol_whitelist_ranges(void)
731{ 732{
732 return gk20a_runcontrol_whitelist_ranges; 733 return gk20a_runcontrol_whitelist_ranges;
733} 734}
734 735
735int gk20a_get_runcontrol_whitelist_ranges_count(void) 736static int gk20a_get_runcontrol_whitelist_ranges_count(void)
736{ 737{
737 return gk20a_runcontrol_whitelist_ranges_count; 738 return gk20a_runcontrol_whitelist_ranges_count;
738} 739}
739 740
740const u32 *gk20a_get_qctl_whitelist(void) 741static const u32 *gk20a_get_qctl_whitelist(void)
741{ 742{
742 return gk20a_qctl_whitelist; 743 return gk20a_qctl_whitelist;
743} 744}
744 745
745int gk20a_get_qctl_whitelist_count(void) 746static int gk20a_get_qctl_whitelist_count(void)
746{ 747{
747 return gk20a_qctl_whitelist_count; 748 return gk20a_qctl_whitelist_count;
748} 749}
749 750
750const struct regop_offset_range *gk20a_get_qctl_whitelist_ranges(void) 751static const struct regop_offset_range *gk20a_get_qctl_whitelist_ranges(void)
751{ 752{
752 return gk20a_qctl_whitelist_ranges; 753 return gk20a_qctl_whitelist_ranges;
753} 754}
754 755
755int gk20a_get_qctl_whitelist_ranges_count(void) 756static int gk20a_get_qctl_whitelist_ranges_count(void)
756{ 757{
757 return gk20a_qctl_whitelist_ranges_count; 758 return gk20a_qctl_whitelist_ranges_count;
758} 759}
759 760
760int gk20a_apply_smpc_war(struct dbg_session_gk20a *dbg_s) 761static int gk20a_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
761{ 762{
762 /* The following regops are a hack/war to make up for the fact that we 763 /* The following regops are a hack/war to make up for the fact that we
763 * just scribbled into the ctxsw image w/o really knowing whether 764 * just scribbled into the ctxsw image w/o really knowing whether
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 50b495a6..470a93bc 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -55,13 +55,13 @@ static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm);
55 55
56/*Globals*/ 56/*Globals*/
57static void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE); 57static void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
58get_ucode_details pmu_acr_supp_ucode_list[] = { 58static get_ucode_details pmu_acr_supp_ucode_list[] = {
59 pmu_ucode_details, 59 pmu_ucode_details,
60 fecs_ucode_details, 60 fecs_ucode_details,
61}; 61};
62 62
63/*Once is LS mode, cpuctl_alias is only accessible*/ 63/*Once is LS mode, cpuctl_alias is only accessible*/
64void start_gm20b_pmu(struct gk20a *g) 64static void start_gm20b_pmu(struct gk20a *g)
65{ 65{
66 /*disable irqs for hs falcon booting as we will poll for halt*/ 66 /*disable irqs for hs falcon booting as we will poll for halt*/
67 mutex_lock(&g->pmu.isr_mutex); 67 mutex_lock(&g->pmu.isr_mutex);
@@ -272,7 +272,7 @@ int prepare_ucode_blob(struct gk20a *g)
272 return 0; 272 return 0;
273} 273}
274 274
275u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm, 275static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr *plsfm,
276 u32 falcon_id) 276 u32 falcon_id)
277{ 277{
278 return (plsfm->disable_mask >> falcon_id) & 0x1; 278 return (plsfm->disable_mask >> falcon_id) & 0x1;
@@ -364,7 +364,7 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
364} 364}
365 365
366 366
367int pmu_populate_loader_cfg(struct gk20a *g, 367static int pmu_populate_loader_cfg(struct gk20a *g,
368 struct lsfm_managed_ucode_img *lsfm, 368 struct lsfm_managed_ucode_img *lsfm,
369 union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size) 369 union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size)
370{ 370{
@@ -431,7 +431,7 @@ int pmu_populate_loader_cfg(struct gk20a *g,
431 return 0; 431 return 0;
432} 432}
433 433
434int flcn_populate_bl_dmem_desc(struct gk20a *g, 434static int flcn_populate_bl_dmem_desc(struct gk20a *g,
435 struct lsfm_managed_ucode_img *lsfm, 435 struct lsfm_managed_ucode_img *lsfm,
436 union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size) 436 union flcn_bl_generic_desc *p_bl_gen_desc, u32 *p_bl_gen_desc_size)
437{ 437{
@@ -1019,7 +1019,7 @@ err_release_acr_fw:
1019 return err; 1019 return err;
1020} 1020}
1021 1021
1022u8 pmu_is_debug_mode_en(struct gk20a *g) 1022static u8 pmu_is_debug_mode_en(struct gk20a *g)
1023{ 1023{
1024 u32 ctl_stat = gk20a_readl(g, pwr_pmu_scpctl_stat_r()); 1024 u32 ctl_stat = gk20a_readl(g, pwr_pmu_scpctl_stat_r());
1025 return pwr_pmu_scpctl_stat_debug_mode_v(ctl_stat); 1025 return pwr_pmu_scpctl_stat_debug_mode_v(ctl_stat);
@@ -1125,8 +1125,8 @@ static int bl_bootstrap(struct pmu_gk20a *pmu,
1125 return 0; 1125 return 0;
1126} 1126}
1127 1127
1128int gm20b_init_pmu_setup_hw1(struct gk20a *g, struct flcn_bl_dmem_desc *desc, 1128static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1129 u32 bl_sz) 1129 struct flcn_bl_dmem_desc *desc, u32 bl_sz)
1130{ 1130{
1131 1131
1132 struct pmu_gk20a *pmu = &g->pmu; 1132 struct pmu_gk20a *pmu = &g->pmu;
diff --git a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
index a2aa81d8..293c6c74 100644
--- a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
@@ -64,7 +64,7 @@ static bool gm20b_kind_zbc(u8 k)
64 k <= gmmu_pte_kind_s8_2s_v()); 64 k <= gmmu_pte_kind_s8_2s_v());
65} 65}
66 66
67void gm20b_init_kind_attr(void) 67static void gm20b_init_kind_attr(void)
68{ 68{
69 u16 k; 69 u16 k;
70 70
diff --git a/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c b/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c
index b5477c0f..6d94e299 100644
--- a/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c
@@ -30,17 +30,17 @@ struct gating_desc {
30 u32 disable; 30 u32 disable;
31}; 31};
32/* slcg bus */ 32/* slcg bus */
33const struct gating_desc gm20b_slcg_bus[] = { 33static const struct gating_desc gm20b_slcg_bus[] = {
34 {.addr = 0x00001c04, .prod = 0x00000000, .disable = 0x000003fe}, 34 {.addr = 0x00001c04, .prod = 0x00000000, .disable = 0x000003fe},
35}; 35};
36 36
37/* slcg ce2 */ 37/* slcg ce2 */
38const struct gating_desc gm20b_slcg_ce2[] = { 38static const struct gating_desc gm20b_slcg_ce2[] = {
39 {.addr = 0x00106f28, .prod = 0x00000000, .disable = 0x000007fe}, 39 {.addr = 0x00106f28, .prod = 0x00000000, .disable = 0x000007fe},
40}; 40};
41 41
42/* slcg chiplet */ 42/* slcg chiplet */
43const struct gating_desc gm20b_slcg_chiplet[] = { 43static const struct gating_desc gm20b_slcg_chiplet[] = {
44 {.addr = 0x0010c07c, .prod = 0x00000000, .disable = 0x00000007}, 44 {.addr = 0x0010c07c, .prod = 0x00000000, .disable = 0x00000007},
45 {.addr = 0x0010e07c, .prod = 0x00000000, .disable = 0x00000007}, 45 {.addr = 0x0010e07c, .prod = 0x00000000, .disable = 0x00000007},
46 {.addr = 0x0010d07c, .prod = 0x00000000, .disable = 0x00000007}, 46 {.addr = 0x0010d07c, .prod = 0x00000000, .disable = 0x00000007},
@@ -48,23 +48,23 @@ const struct gating_desc gm20b_slcg_chiplet[] = {
48}; 48};
49 49
50/* slcg ctxsw firmware */ 50/* slcg ctxsw firmware */
51const struct gating_desc gm20b_slcg_ctxsw_firmware[] = { 51static const struct gating_desc gm20b_slcg_ctxsw_firmware[] = {
52 {.addr = 0x00005f00, .prod = 0x00020008, .disable = 0x0003fffe}, 52 {.addr = 0x00005f00, .prod = 0x00020008, .disable = 0x0003fffe},
53}; 53};
54 54
55/* slcg fb */ 55/* slcg fb */
56const struct gating_desc gm20b_slcg_fb[] = { 56static const struct gating_desc gm20b_slcg_fb[] = {
57 {.addr = 0x00100d14, .prod = 0xfffffffe, .disable = 0xfffffffe}, 57 {.addr = 0x00100d14, .prod = 0xfffffffe, .disable = 0xfffffffe},
58 {.addr = 0x00100c9c, .prod = 0x000001fe, .disable = 0x000001fe}, 58 {.addr = 0x00100c9c, .prod = 0x000001fe, .disable = 0x000001fe},
59}; 59};
60 60
61/* slcg fifo */ 61/* slcg fifo */
62const struct gating_desc gm20b_slcg_fifo[] = { 62static const struct gating_desc gm20b_slcg_fifo[] = {
63 {.addr = 0x000026ac, .prod = 0x00000100, .disable = 0x0001fffe}, 63 {.addr = 0x000026ac, .prod = 0x00000100, .disable = 0x0001fffe},
64}; 64};
65 65
66/* slcg gr */ 66/* slcg gr */
67const struct gating_desc gm20b_slcg_gr[] = { 67static const struct gating_desc gm20b_slcg_gr[] = {
68 {.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x03fffffe}, 68 {.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x03fffffe},
69 {.addr = 0x0040917c, .prod = 0x00020008, .disable = 0x0003fffe}, 69 {.addr = 0x0040917c, .prod = 0x00020008, .disable = 0x0003fffe},
70 {.addr = 0x00409894, .prod = 0x00000000, .disable = 0x0003fffe}, 70 {.addr = 0x00409894, .prod = 0x00000000, .disable = 0x0003fffe},
@@ -126,13 +126,13 @@ const struct gating_desc gm20b_slcg_gr[] = {
126}; 126};
127 127
128/* slcg ltc */ 128/* slcg ltc */
129const struct gating_desc gm20b_slcg_ltc[] = { 129static const struct gating_desc gm20b_slcg_ltc[] = {
130 {.addr = 0x0017e050, .prod = 0x00000000, .disable = 0xfffffffe}, 130 {.addr = 0x0017e050, .prod = 0x00000000, .disable = 0xfffffffe},
131 {.addr = 0x0017e35c, .prod = 0x00000000, .disable = 0xfffffffe}, 131 {.addr = 0x0017e35c, .prod = 0x00000000, .disable = 0xfffffffe},
132}; 132};
133 133
134/* slcg perf */ 134/* slcg perf */
135const struct gating_desc gm20b_slcg_perf[] = { 135static const struct gating_desc gm20b_slcg_perf[] = {
136 {.addr = 0x001be018, .prod = 0x000001ff, .disable = 0x00000000}, 136 {.addr = 0x001be018, .prod = 0x000001ff, .disable = 0x00000000},
137 {.addr = 0x001bc018, .prod = 0x000001ff, .disable = 0x00000000}, 137 {.addr = 0x001bc018, .prod = 0x000001ff, .disable = 0x00000000},
138 {.addr = 0x001b8018, .prod = 0x000001ff, .disable = 0x00000000}, 138 {.addr = 0x001b8018, .prod = 0x000001ff, .disable = 0x00000000},
@@ -140,12 +140,12 @@ const struct gating_desc gm20b_slcg_perf[] = {
140}; 140};
141 141
142/* slcg PriRing */ 142/* slcg PriRing */
143const struct gating_desc gm20b_slcg_priring[] = { 143static const struct gating_desc gm20b_slcg_priring[] = {
144 {.addr = 0x001200a8, .prod = 0x00000000, .disable = 0x00000001}, 144 {.addr = 0x001200a8, .prod = 0x00000000, .disable = 0x00000001},
145}; 145};
146 146
147/* slcg pwr_csb */ 147/* slcg pwr_csb */
148const struct gating_desc gm20b_slcg_pwr_csb[] = { 148static const struct gating_desc gm20b_slcg_pwr_csb[] = {
149 {.addr = 0x0000017c, .prod = 0x00020008, .disable = 0x0003fffe}, 149 {.addr = 0x0000017c, .prod = 0x00020008, .disable = 0x0003fffe},
150 {.addr = 0x00000e74, .prod = 0x00000000, .disable = 0x0000000f}, 150 {.addr = 0x00000e74, .prod = 0x00000000, .disable = 0x0000000f},
151 {.addr = 0x00000a74, .prod = 0x00000000, .disable = 0x00007ffe}, 151 {.addr = 0x00000a74, .prod = 0x00000000, .disable = 0x00007ffe},
@@ -153,35 +153,35 @@ const struct gating_desc gm20b_slcg_pwr_csb[] = {
153}; 153};
154 154
155/* slcg pmu */ 155/* slcg pmu */
156const struct gating_desc gm20b_slcg_pmu[] = { 156static const struct gating_desc gm20b_slcg_pmu[] = {
157 {.addr = 0x0010a17c, .prod = 0x00020008, .disable = 0x0003fffe}, 157 {.addr = 0x0010a17c, .prod = 0x00020008, .disable = 0x0003fffe},
158 {.addr = 0x0010aa74, .prod = 0x00000000, .disable = 0x00007ffe}, 158 {.addr = 0x0010aa74, .prod = 0x00000000, .disable = 0x00007ffe},
159 {.addr = 0x0010ae74, .prod = 0x00000000, .disable = 0x0000000f}, 159 {.addr = 0x0010ae74, .prod = 0x00000000, .disable = 0x0000000f},
160}; 160};
161 161
162/* therm gr */ 162/* therm gr */
163const struct gating_desc gm20b_slcg_therm[] = { 163static const struct gating_desc gm20b_slcg_therm[] = {
164 {.addr = 0x000206b8, .prod = 0x00000000, .disable = 0x0000000f}, 164 {.addr = 0x000206b8, .prod = 0x00000000, .disable = 0x0000000f},
165}; 165};
166 166
167/* slcg Xbar */ 167/* slcg Xbar */
168const struct gating_desc gm20b_slcg_xbar[] = { 168static const struct gating_desc gm20b_slcg_xbar[] = {
169 {.addr = 0x0013cbe4, .prod = 0x00000000, .disable = 0x1ffffffe}, 169 {.addr = 0x0013cbe4, .prod = 0x00000000, .disable = 0x1ffffffe},
170 {.addr = 0x0013cc04, .prod = 0x00000000, .disable = 0x1ffffffe}, 170 {.addr = 0x0013cc04, .prod = 0x00000000, .disable = 0x1ffffffe},
171}; 171};
172 172
173/* blcg bus */ 173/* blcg bus */
174const struct gating_desc gm20b_blcg_bus[] = { 174static const struct gating_desc gm20b_blcg_bus[] = {
175 {.addr = 0x00001c00, .prod = 0x00000042, .disable = 0x00000000}, 175 {.addr = 0x00001c00, .prod = 0x00000042, .disable = 0x00000000},
176}; 176};
177 177
178/* blcg ctxsw firmware */ 178/* blcg ctxsw firmware */
179const struct gating_desc gm20b_blcg_ctxsw_firmware[] = { 179static const struct gating_desc gm20b_blcg_ctxsw_firmware[] = {
180 {.addr = 0x00022400, .prod = 0x00000000, .disable = 0x00000000}, 180 {.addr = 0x00022400, .prod = 0x00000000, .disable = 0x00000000},
181}; 181};
182 182
183/* blcg fb */ 183/* blcg fb */
184const struct gating_desc gm20b_blcg_fb[] = { 184static const struct gating_desc gm20b_blcg_fb[] = {
185 {.addr = 0x00100d10, .prod = 0x0000c242, .disable = 0x00000000}, 185 {.addr = 0x00100d10, .prod = 0x0000c242, .disable = 0x00000000},
186 {.addr = 0x00100d30, .prod = 0x0000c242, .disable = 0x00000000}, 186 {.addr = 0x00100d30, .prod = 0x0000c242, .disable = 0x00000000},
187 {.addr = 0x00100d3c, .prod = 0x00000242, .disable = 0x00000000}, 187 {.addr = 0x00100d3c, .prod = 0x00000242, .disable = 0x00000000},
@@ -191,12 +191,12 @@ const struct gating_desc gm20b_blcg_fb[] = {
191}; 191};
192 192
193/* blcg fifo */ 193/* blcg fifo */
194const struct gating_desc gm20b_blcg_fifo[] = { 194static const struct gating_desc gm20b_blcg_fifo[] = {
195 {.addr = 0x000026a4, .prod = 0x0000c242, .disable = 0x00000000}, 195 {.addr = 0x000026a4, .prod = 0x0000c242, .disable = 0x00000000},
196}; 196};
197 197
198/* blcg gr */ 198/* blcg gr */
199const struct gating_desc gm20b_blcg_gr[] = { 199static const struct gating_desc gm20b_blcg_gr[] = {
200 {.addr = 0x004041f0, .prod = 0x00004046, .disable = 0x00000000}, 200 {.addr = 0x004041f0, .prod = 0x00004046, .disable = 0x00000000},
201 {.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000}, 201 {.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000},
202 {.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000}, 202 {.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000},
@@ -261,7 +261,7 @@ const struct gating_desc gm20b_blcg_gr[] = {
261}; 261};
262 262
263/* blcg ltc */ 263/* blcg ltc */
264const struct gating_desc gm20b_blcg_ltc[] = { 264static const struct gating_desc gm20b_blcg_ltc[] = {
265 {.addr = 0x0017e030, .prod = 0x00000044, .disable = 0x00000000}, 265 {.addr = 0x0017e030, .prod = 0x00000044, .disable = 0x00000000},
266 {.addr = 0x0017e040, .prod = 0x00000044, .disable = 0x00000000}, 266 {.addr = 0x0017e040, .prod = 0x00000044, .disable = 0x00000000},
267 {.addr = 0x0017e3e0, .prod = 0x00000044, .disable = 0x00000000}, 267 {.addr = 0x0017e3e0, .prod = 0x00000044, .disable = 0x00000000},
@@ -269,23 +269,23 @@ const struct gating_desc gm20b_blcg_ltc[] = {
269}; 269};
270 270
271/* blcg pwr_csb */ 271/* blcg pwr_csb */
272const struct gating_desc gm20b_blcg_pwr_csb[] = { 272static const struct gating_desc gm20b_blcg_pwr_csb[] = {
273 {.addr = 0x00000a70, .prod = 0x00000045, .disable = 0x00000000}, 273 {.addr = 0x00000a70, .prod = 0x00000045, .disable = 0x00000000},
274}; 274};
275 275
276/* blcg pmu */ 276/* blcg pmu */
277const struct gating_desc gm20b_blcg_pmu[] = { 277static const struct gating_desc gm20b_blcg_pmu[] = {
278 {.addr = 0x0010aa70, .prod = 0x00000045, .disable = 0x00000000}, 278 {.addr = 0x0010aa70, .prod = 0x00000045, .disable = 0x00000000},
279}; 279};
280 280
281/* blcg Xbar */ 281/* blcg Xbar */
282const struct gating_desc gm20b_blcg_xbar[] = { 282static const struct gating_desc gm20b_blcg_xbar[] = {
283 {.addr = 0x0013cbe0, .prod = 0x00000042, .disable = 0x00000000}, 283 {.addr = 0x0013cbe0, .prod = 0x00000042, .disable = 0x00000000},
284 {.addr = 0x0013cc00, .prod = 0x00000042, .disable = 0x00000000}, 284 {.addr = 0x0013cc00, .prod = 0x00000042, .disable = 0x00000000},
285}; 285};
286 286
287/* pg gr */ 287/* pg gr */
288const struct gating_desc gm20b_pg_gr[] = { 288static const struct gating_desc gm20b_pg_gr[] = {
289}; 289};
290 290
291/* static inline functions */ 291/* static inline functions */
@@ -440,21 +440,6 @@ void gm20b_slcg_priring_load_gating_prod(struct gk20a *g,
440 } 440 }
441} 441}
442 442
443void gm20b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
444 bool prod)
445{
446 u32 i;
447 u32 size = sizeof(gm20b_slcg_pwr_csb) / sizeof(struct gating_desc);
448 for (i = 0; i < size; i++) {
449 if (prod)
450 gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr,
451 gm20b_slcg_pwr_csb[i].prod);
452 else
453 gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr,
454 gm20b_slcg_pwr_csb[i].disable);
455 }
456}
457
458void gm20b_slcg_pmu_load_gating_prod(struct gk20a *g, 443void gm20b_slcg_pmu_load_gating_prod(struct gk20a *g,
459 bool prod) 444 bool prod)
460{ 445{
@@ -620,21 +605,6 @@ void gm20b_blcg_pmu_load_gating_prod(struct gk20a *g,
620 } 605 }
621} 606}
622 607
623void gm20b_blcg_xbar_load_gating_prod(struct gk20a *g,
624 bool prod)
625{
626 u32 i;
627 u32 size = sizeof(gm20b_blcg_xbar) / sizeof(struct gating_desc);
628 for (i = 0; i < size; i++) {
629 if (prod)
630 gk20a_writel(g, gm20b_blcg_xbar[i].addr,
631 gm20b_blcg_xbar[i].prod);
632 else
633 gk20a_writel(g, gm20b_blcg_xbar[i].addr,
634 gm20b_blcg_xbar[i].disable);
635 }
636}
637
638void gr_gm20b_pg_gr_load_gating_prod(struct gk20a *g, 608void gr_gm20b_pg_gr_load_gating_prod(struct gk20a *g,
639 bool prod) 609 bool prod)
640{ 610{
diff --git a/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c
index 0309e110..2a654760 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_ctx_gm20b.c
@@ -57,7 +57,7 @@ static int gr_gm20b_get_netlist_name(int index, char *name)
57 return -1; 57 return -1;
58} 58}
59 59
60bool gr_gm20b_is_firmware_defined(void) 60static bool gr_gm20b_is_firmware_defined(void)
61{ 61{
62#ifdef GM20B_NETLIST_IMAGE_FW_NAME 62#ifdef GM20B_NETLIST_IMAGE_FW_NAME
63 return true; 63 return true;
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 835ff6bf..d40e9d52 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -342,7 +342,7 @@ static void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
342 } 342 }
343} 343}
344 344
345void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data) 345static void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
346{ 346{
347 struct gr_gk20a *gr = &g->gr; 347 struct gr_gk20a *gr = &g->gr;
348 u32 gpc_index, ppc_index, stride, val; 348 u32 gpc_index, ppc_index, stride, val;
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 574ad926..2b534816 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -34,7 +34,7 @@
34#define FUSE_OPT_PRIV_SEC_DIS_0 0x264 34#define FUSE_OPT_PRIV_SEC_DIS_0 0x264
35#define PRIV_SECURITY_DISABLE 0x01 35#define PRIV_SECURITY_DISABLE 0x01
36 36
37struct gpu_ops gm20b_ops = { 37static struct gpu_ops gm20b_ops = {
38 .clock_gating = { 38 .clock_gating = {
39 .slcg_bus_load_gating_prod = 39 .slcg_bus_load_gating_prod =
40 gm20b_slcg_bus_load_gating_prod, 40 gm20b_slcg_bus_load_gating_prod,
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index a089b59c..10e3ba7f 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -197,7 +197,7 @@ static void gm20b_ltc_init_fs_state(struct gk20a *g)
197 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg); 197 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg);
198} 198}
199 199
200void gm20b_ltc_isr(struct gk20a *g) 200static void gm20b_ltc_isr(struct gk20a *g)
201{ 201{
202 u32 mc_intr, ltc_intr; 202 u32 mc_intr, ltc_intr;
203 int ltc, slice; 203 int ltc, slice;
@@ -266,7 +266,7 @@ static void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g)
266 "g_elpg_flush too many retries"); 266 "g_elpg_flush too many retries");
267} 267}
268 268
269u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base) 269static u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
270{ 270{
271 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r()); 271 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
272 if (val == 2) { 272 if (val == 2) {
@@ -282,7 +282,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
282/* 282/*
283 * Performs a full flush of the L2 cache. 283 * Performs a full flush of the L2 cache.
284 */ 284 */
285void gm20b_flush_ltc(struct gk20a *g) 285static void gm20b_flush_ltc(struct gk20a *g)
286{ 286{
287 u32 op_pending; 287 u32 op_pending;
288 unsigned long now, timeout; 288 unsigned long now, timeout;
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
index 13e7859f..030701b9 100644
--- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c
@@ -220,7 +220,7 @@ fail:
220 return ret; 220 return ret;
221} 221}
222 222
223void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr, 223static void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
224 u64 size, u32 pgsz_idx) { 224 u64 size, u32 pgsz_idx) {
225 u64 vaddr_hi; 225 u64 vaddr_hi;
226 u32 pde_lo, pde_hi, pde_i; 226 u32 pde_lo, pde_hi, pde_i;
@@ -253,14 +253,15 @@ void gm20b_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr,
253 return; 253 return;
254} 254}
255 255
256bool gm20b_mm_mmu_debug_mode_enabled(struct gk20a *g) 256static bool gm20b_mm_mmu_debug_mode_enabled(struct gk20a *g)
257{ 257{
258 u32 debug_ctrl = gk20a_readl(g, gr_gpcs_pri_mmu_debug_ctrl_r()); 258 u32 debug_ctrl = gk20a_readl(g, gr_gpcs_pri_mmu_debug_ctrl_r());
259 return gr_gpcs_pri_mmu_debug_ctrl_debug_v(debug_ctrl) == 259 return gr_gpcs_pri_mmu_debug_ctrl_debug_v(debug_ctrl) ==
260 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(); 260 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v();
261} 261}
262 262
263void gm20b_mm_set_big_page_size(struct gk20a *g, void *inst_ptr, int size) 263static void gm20b_mm_set_big_page_size(struct gk20a *g,
264 void *inst_ptr, int size)
264{ 265{
265 u32 val; 266 u32 val;
266 267
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 91927950..6a7f0d92 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -131,7 +131,7 @@ static struct pg_init_sequence_list _pginitseq_gm20b[] = {
131 { 0x0010e040, 0x00000000}, 131 { 0x0010e040, 0x00000000},
132}; 132};
133 133
134int gm20b_pmu_setup_elpg(struct gk20a *g) 134static int gm20b_pmu_setup_elpg(struct gk20a *g)
135{ 135{
136 int ret = 0; 136 int ret = 0;
137 u32 reg_writes; 137 u32 reg_writes;
@@ -153,7 +153,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
153 return ret; 153 return ret;
154} 154}
155 155
156void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg, 156static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
157 void *param, u32 handle, u32 status) 157 void *param, u32 handle, u32 status)
158{ 158{
159 gk20a_dbg_fn(""); 159 gk20a_dbg_fn("");
@@ -166,7 +166,7 @@ void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
166} 166}
167 167
168 168
169int gm20b_pmu_init_acr(struct gk20a *g) 169static int gm20b_pmu_init_acr(struct gk20a *g)
170{ 170{
171 struct pmu_gk20a *pmu = &g->pmu; 171 struct pmu_gk20a *pmu = &g->pmu;
172 struct pmu_cmd cmd; 172 struct pmu_cmd cmd;
@@ -190,7 +190,7 @@ int gm20b_pmu_init_acr(struct gk20a *g)
190 return 0; 190 return 0;
191} 191}
192 192
193void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, 193static void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
194 void *param, u32 handle, u32 status) 194 void *param, u32 handle, u32 status)
195{ 195{
196 196
diff --git a/drivers/gpu/nvgpu/gm20b/regops_gm20b.c b/drivers/gpu/nvgpu/gm20b/regops_gm20b.c
index 71ccda37..cc1c72c2 100644
--- a/drivers/gpu/nvgpu/gm20b/regops_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/regops_gm20b.c
@@ -434,67 +434,68 @@ static const struct regop_offset_range gm20b_qctl_whitelist_ranges[] = {
434static const u32 gm20b_qctl_whitelist_ranges_count = 434static const u32 gm20b_qctl_whitelist_ranges_count =
435 ARRAY_SIZE(gm20b_qctl_whitelist_ranges); 435 ARRAY_SIZE(gm20b_qctl_whitelist_ranges);
436 436
437const struct regop_offset_range *gm20b_get_global_whitelist_ranges(void) 437static const struct regop_offset_range *gm20b_get_global_whitelist_ranges(void)
438{ 438{
439 return gm20b_global_whitelist_ranges; 439 return gm20b_global_whitelist_ranges;
440} 440}
441 441
442int gm20b_get_global_whitelist_ranges_count(void) 442static int gm20b_get_global_whitelist_ranges_count(void)
443{ 443{
444 return gm20b_global_whitelist_ranges_count; 444 return gm20b_global_whitelist_ranges_count;
445} 445}
446 446
447const struct regop_offset_range *gm20b_get_context_whitelist_ranges(void) 447static const struct regop_offset_range *gm20b_get_context_whitelist_ranges(void)
448{ 448{
449 return gm20b_context_whitelist_ranges; 449 return gm20b_context_whitelist_ranges;
450} 450}
451 451
452int gm20b_get_context_whitelist_ranges_count(void) 452static int gm20b_get_context_whitelist_ranges_count(void)
453{ 453{
454 return gm20b_context_whitelist_ranges_count; 454 return gm20b_context_whitelist_ranges_count;
455} 455}
456 456
457const u32 *gm20b_get_runcontrol_whitelist(void) 457static const u32 *gm20b_get_runcontrol_whitelist(void)
458{ 458{
459 return gm20b_runcontrol_whitelist; 459 return gm20b_runcontrol_whitelist;
460} 460}
461 461
462int gm20b_get_runcontrol_whitelist_count(void) 462static int gm20b_get_runcontrol_whitelist_count(void)
463{ 463{
464 return gm20b_runcontrol_whitelist_count; 464 return gm20b_runcontrol_whitelist_count;
465} 465}
466 466
467const struct regop_offset_range *gm20b_get_runcontrol_whitelist_ranges(void) 467static const
468struct regop_offset_range *gm20b_get_runcontrol_whitelist_ranges(void)
468{ 469{
469 return gm20b_runcontrol_whitelist_ranges; 470 return gm20b_runcontrol_whitelist_ranges;
470} 471}
471 472
472int gm20b_get_runcontrol_whitelist_ranges_count(void) 473static int gm20b_get_runcontrol_whitelist_ranges_count(void)
473{ 474{
474 return gm20b_runcontrol_whitelist_ranges_count; 475 return gm20b_runcontrol_whitelist_ranges_count;
475} 476}
476 477
477const u32 *gm20b_get_qctl_whitelist(void) 478static const u32 *gm20b_get_qctl_whitelist(void)
478{ 479{
479 return gm20b_qctl_whitelist; 480 return gm20b_qctl_whitelist;
480} 481}
481 482
482int gm20b_get_qctl_whitelist_count(void) 483static int gm20b_get_qctl_whitelist_count(void)
483{ 484{
484 return gm20b_qctl_whitelist_count; 485 return gm20b_qctl_whitelist_count;
485} 486}
486 487
487const struct regop_offset_range *gm20b_get_qctl_whitelist_ranges(void) 488static const struct regop_offset_range *gm20b_get_qctl_whitelist_ranges(void)
488{ 489{
489 return gm20b_qctl_whitelist_ranges; 490 return gm20b_qctl_whitelist_ranges;
490} 491}
491 492
492int gm20b_get_qctl_whitelist_ranges_count(void) 493static int gm20b_get_qctl_whitelist_ranges_count(void)
493{ 494{
494 return gm20b_qctl_whitelist_ranges_count; 495 return gm20b_qctl_whitelist_ranges_count;
495} 496}
496 497
497int gm20b_apply_smpc_war(struct dbg_session_gk20a *dbg_s) 498static int gm20b_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
498{ 499{
499 /* Not needed on gm20b */ 500 /* Not needed on gm20b */
500 return 0; 501 return 0;