summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2014-09-26 09:05:41 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:34 -0400
commit2d0bcfa3318c276dba5100510e59c5c2fa62957f (patch)
tree8651c32920cddc94cf046c76e7ea6676d6e688e8
parent79ab01debd382b91aecb474fc3cb51de78e2f1a7 (diff)
gpu: nvgpu: add __must_check to gk20a_busy
The return value of gk20a_busy must be checked since it may not succeed in some cases. Add the __must_check attribute that generates a compiler warning for code that does not read the return value and fix all uses of the function to take error cases into account. Bug 200040921 Change-Id: Ibc2b119985fa230324c88026fe94fc5f1894fe4f Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/542552 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c9
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c27
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c24
7 files changed, 58 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
index ed7602f0..5ca7c806 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
@@ -82,8 +82,12 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
82 if (atomic_dec_return(&as_share->ref_cnt) > 0) 82 if (atomic_dec_return(&as_share->ref_cnt) > 0)
83 return 0; 83 return 0;
84 84
85 gk20a_busy(g->dev); 85 err = gk20a_busy(g->dev);
86 if (err)
87 return err;
88
86 err = gk20a_vm_release_share(as_share); 89 err = gk20a_vm_release_share(as_share);
90
87 gk20a_idle(g->dev); 91 gk20a_idle(g->dev);
88 92
89 release_as_share_id(as_share->as, as_share->id); 93 release_as_share_id(as_share->as, as_share->id);
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
index 746460c7..cd70d7c3 100644
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
@@ -136,7 +136,7 @@ static int gk20a_ctrl_mark_compressible_write(
136 struct gk20a *g, 136 struct gk20a *g,
137 struct nvgpu_gpu_mark_compressible_write_args *args) 137 struct nvgpu_gpu_mark_compressible_write_args *args)
138{ 138{
139 int ret = 0; 139 int ret;
140 140
141 ret = gk20a_busy(g->dev); 141 ret = gk20a_busy(g->dev);
142 if (ret) 142 if (ret)
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 40751b88..6af54e68 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -571,7 +571,10 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
571 (g->dbg_powergating_disabled_refcount++ == 0)) { 571 (g->dbg_powergating_disabled_refcount++ == 0)) {
572 572
573 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy"); 573 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
574 gk20a_busy(g->dev); 574 err = gk20a_busy(g->dev);
575 if (err)
576 return err;
577
575 err = gk20a_busy(dbg_s->pdev); 578 err = gk20a_busy(dbg_s->pdev);
576 if (err) 579 if (err)
577 return -EPERM; 580 return -EPERM;
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index f41d883f..2cc3d38d 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -166,9 +166,14 @@ void gk20a_debug_show_dump(struct platform_device *pdev,
166 struct gk20a *g = platform->g; 166 struct gk20a *g = platform->g;
167 struct fifo_gk20a *f = &g->fifo; 167 struct fifo_gk20a *f = &g->fifo;
168 u32 chid; 168 u32 chid;
169 int i; 169 int i, err;
170
171 err = gk20a_busy(g->dev);
172 if (err) {
173 gk20a_debug_output(o, "failed to power on gpu: %d\n", err);
174 return;
175 }
170 176
171 gk20a_busy(g->dev);
172 for (i = 0; i < fifo_pbdma_status__size_1_v(); i++) { 177 for (i = 0; i < fifo_pbdma_status__size_1_v(); i++) {
173 u32 status = gk20a_readl(g, fifo_pbdma_status_r(i)); 178 u32 status = gk20a_readl(g, fifo_pbdma_status_r(i));
174 u32 chan_status = fifo_pbdma_status_chan_status_v(status); 179 u32 chan_status = fifo_pbdma_status_chan_status_v(status);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 6617f684..38322c87 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -703,7 +703,7 @@ void gk20a_create_sysfs(struct platform_device *dev);
703#define GK20A_SIM_IORESOURCE_MEM 2 703#define GK20A_SIM_IORESOURCE_MEM 2
704 704
705void gk20a_busy_noresume(struct platform_device *pdev); 705void gk20a_busy_noresume(struct platform_device *pdev);
706int gk20a_busy(struct platform_device *pdev); 706int __must_check gk20a_busy(struct platform_device *pdev);
707void gk20a_idle(struct platform_device *pdev); 707void gk20a_idle(struct platform_device *pdev);
708void gk20a_disable(struct gk20a *g, u32 units); 708void gk20a_disable(struct gk20a *g, u32 units);
709void gk20a_enable(struct gk20a *g, u32 units); 709void gk20a_enable(struct gk20a *g, u32 units);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
index b23a1406..52a34086 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
@@ -45,11 +45,15 @@ static ssize_t elcg_enable_store(struct device *device,
45 struct platform_device *ndev = to_platform_device(device); 45 struct platform_device *ndev = to_platform_device(device);
46 struct gk20a *g = get_gk20a(ndev); 46 struct gk20a *g = get_gk20a(ndev);
47 unsigned long val = 0; 47 unsigned long val = 0;
48 int err;
48 49
49 if (kstrtoul(buf, 10, &val) < 0) 50 if (kstrtoul(buf, 10, &val) < 0)
50 return -EINVAL; 51 return -EINVAL;
51 52
52 gk20a_busy(g->dev); 53 err = gk20a_busy(g->dev);
54 if (err)
55 return err;
56
53 if (val) { 57 if (val) {
54 g->elcg_enabled = true; 58 g->elcg_enabled = true;
55 gr_gk20a_init_elcg_mode(g, ELCG_AUTO, ENGINE_GR_GK20A); 59 gr_gk20a_init_elcg_mode(g, ELCG_AUTO, ENGINE_GR_GK20A);
@@ -84,6 +88,7 @@ static ssize_t blcg_enable_store(struct device *device,
84 struct platform_device *ndev = to_platform_device(device); 88 struct platform_device *ndev = to_platform_device(device);
85 struct gk20a *g = get_gk20a(ndev); 89 struct gk20a *g = get_gk20a(ndev);
86 unsigned long val = 0; 90 unsigned long val = 0;
91 int err;
87 92
88 if (kstrtoul(buf, 10, &val) < 0) 93 if (kstrtoul(buf, 10, &val) < 0)
89 return -EINVAL; 94 return -EINVAL;
@@ -93,7 +98,10 @@ static ssize_t blcg_enable_store(struct device *device,
93 else 98 else
94 g->blcg_enabled = false; 99 g->blcg_enabled = false;
95 100
96 gk20a_busy(g->dev); 101 err = gk20a_busy(g->dev);
102 if (err)
103 return err;
104
97 if (g->ops.clock_gating.blcg_bus_load_gating_prod) 105 if (g->ops.clock_gating.blcg_bus_load_gating_prod)
98 g->ops.clock_gating.blcg_bus_load_gating_prod(g, g->blcg_enabled); 106 g->ops.clock_gating.blcg_bus_load_gating_prod(g, g->blcg_enabled);
99 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod) 107 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod)
@@ -133,6 +141,7 @@ static ssize_t slcg_enable_store(struct device *device,
133 struct platform_device *ndev = to_platform_device(device); 141 struct platform_device *ndev = to_platform_device(device);
134 struct gk20a *g = get_gk20a(ndev); 142 struct gk20a *g = get_gk20a(ndev);
135 unsigned long val = 0; 143 unsigned long val = 0;
144 int err;
136 145
137 if (kstrtoul(buf, 10, &val) < 0) 146 if (kstrtoul(buf, 10, &val) < 0)
138 return -EINVAL; 147 return -EINVAL;
@@ -147,7 +156,10 @@ static ssize_t slcg_enable_store(struct device *device,
147 * init. Therefore, it would be incongruous to add it here. Once 156 * init. Therefore, it would be incongruous to add it here. Once
148 * it is added to init, we should add it here too. 157 * it is added to init, we should add it here too.
149 */ 158 */
150 gk20a_busy(g->dev); 159 err = gk20a_busy(g->dev);
160 if (err)
161 return err;
162
151 if (g->ops.clock_gating.slcg_bus_load_gating_prod) 163 if (g->ops.clock_gating.slcg_bus_load_gating_prod)
152 g->ops.clock_gating.slcg_bus_load_gating_prod(g, g->slcg_enabled); 164 g->ops.clock_gating.slcg_bus_load_gating_prod(g, g->slcg_enabled);
153 if (g->ops.clock_gating.slcg_ce2_load_gating_prod) 165 if (g->ops.clock_gating.slcg_ce2_load_gating_prod)
@@ -305,11 +317,15 @@ static ssize_t gk20a_load_show(struct device *dev,
305 struct gk20a *g = get_gk20a(pdev); 317 struct gk20a *g = get_gk20a(pdev);
306 u32 busy_time; 318 u32 busy_time;
307 ssize_t res; 319 ssize_t res;
320 int err;
308 321
309 if (!g->power_on) { 322 if (!g->power_on) {
310 busy_time = 0; 323 busy_time = 0;
311 } else { 324 } else {
312 gk20a_busy(g->dev); 325 err = gk20a_busy(g->dev);
326 if (err)
327 return err;
328
313 gk20a_pmu_load_update(g); 329 gk20a_pmu_load_update(g);
314 gk20a_pmu_load_norm(g, &busy_time); 330 gk20a_pmu_load_norm(g, &busy_time);
315 gk20a_idle(g->dev); 331 gk20a_idle(g->dev);
@@ -436,6 +452,9 @@ static ssize_t aelpg_enable_store(struct device *device,
436 return -EINVAL; 452 return -EINVAL;
437 453
438 err = gk20a_busy(g->dev); 454 err = gk20a_busy(g->dev);
455 if (err)
456 return err;
457
439 if (g->pmu.pmu_ready) { 458 if (g->pmu.pmu_ready) {
440 if (val && !g->aelpg_enabled) { 459 if (val && !g->aelpg_enabled) {
441 g->aelpg_enabled = true; 460 g->aelpg_enabled = true;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index f7af9035..c4872ab8 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -3721,13 +3721,12 @@ int gk20a_pmu_load_update(struct gk20a *g)
3721void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, 3721void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
3722 u32 *total_cycles) 3722 u32 *total_cycles)
3723{ 3723{
3724 if (!g->power_on) { 3724 if (!g->power_on || gk20a_busy(g->dev)) {
3725 *busy_cycles = 0; 3725 *busy_cycles = 0;
3726 *total_cycles = 0; 3726 *total_cycles = 0;
3727 return; 3727 return;
3728 } 3728 }
3729 3729
3730 gk20a_busy(g->dev);
3731 *busy_cycles = pwr_pmu_idle_count_value_v( 3730 *busy_cycles = pwr_pmu_idle_count_value_v(
3732 gk20a_readl(g, pwr_pmu_idle_count_r(1))); 3731 gk20a_readl(g, pwr_pmu_idle_count_r(1)));
3733 rmb(); 3732 rmb();
@@ -3740,10 +3739,9 @@ void gk20a_pmu_reset_load_counters(struct gk20a *g)
3740{ 3739{
3741 u32 reg_val = pwr_pmu_idle_count_reset_f(1); 3740 u32 reg_val = pwr_pmu_idle_count_reset_f(1);
3742 3741
3743 if (!g->power_on) 3742 if (!g->power_on || gk20a_busy(g->dev))
3744 return; 3743 return;
3745 3744
3746 gk20a_busy(g->dev);
3747 gk20a_writel(g, pwr_pmu_idle_count_r(2), reg_val); 3745 gk20a_writel(g, pwr_pmu_idle_count_r(2), reg_val);
3748 wmb(); 3746 wmb();
3749 gk20a_writel(g, pwr_pmu_idle_count_r(1), reg_val); 3747 gk20a_writel(g, pwr_pmu_idle_count_r(1), reg_val);
@@ -3929,10 +3927,14 @@ static int elpg_residency_show(struct seq_file *s, void *data)
3929 u32 ungating_time = 0; 3927 u32 ungating_time = 0;
3930 u32 gating_cnt; 3928 u32 gating_cnt;
3931 u64 total_ingating, total_ungating, residency, divisor, dividend; 3929 u64 total_ingating, total_ungating, residency, divisor, dividend;
3930 int err;
3932 3931
3933 /* Don't unnecessarily power on the device */ 3932 /* Don't unnecessarily power on the device */
3934 if (g->power_on) { 3933 if (g->power_on) {
3935 gk20a_busy(g->dev); 3934 err = gk20a_busy(g->dev);
3935 if (err)
3936 return err;
3937
3936 gk20a_pmu_get_elpg_residency_gating(g, &ingating_time, 3938 gk20a_pmu_get_elpg_residency_gating(g, &ingating_time,
3937 &ungating_time, &gating_cnt); 3939 &ungating_time, &gating_cnt);
3938 gk20a_idle(g->dev); 3940 gk20a_idle(g->dev);
@@ -3974,9 +3976,13 @@ static int elpg_transitions_show(struct seq_file *s, void *data)
3974 struct gk20a *g = s->private; 3976 struct gk20a *g = s->private;
3975 u32 ingating_time, ungating_time, total_gating_cnt; 3977 u32 ingating_time, ungating_time, total_gating_cnt;
3976 u32 gating_cnt = 0; 3978 u32 gating_cnt = 0;
3979 int err;
3977 3980
3978 if (g->power_on) { 3981 if (g->power_on) {
3979 gk20a_busy(g->dev); 3982 err = gk20a_busy(g->dev);
3983 if (err)
3984 return err;
3985
3980 gk20a_pmu_get_elpg_residency_gating(g, &ingating_time, 3986 gk20a_pmu_get_elpg_residency_gating(g, &ingating_time,
3981 &ungating_time, &gating_cnt); 3987 &ungating_time, &gating_cnt);
3982 gk20a_idle(g->dev); 3988 gk20a_idle(g->dev);
@@ -4066,6 +4072,7 @@ static ssize_t perfmon_events_enable_write(struct file *file,
4066 unsigned long val = 0; 4072 unsigned long val = 0;
4067 char buf[40]; 4073 char buf[40];
4068 int buf_size; 4074 int buf_size;
4075 int err;
4069 4076
4070 memset(buf, 0, sizeof(buf)); 4077 memset(buf, 0, sizeof(buf));
4071 buf_size = min(count, (sizeof(buf)-1)); 4078 buf_size = min(count, (sizeof(buf)-1));
@@ -4078,7 +4085,10 @@ static ssize_t perfmon_events_enable_write(struct file *file,
4078 4085
4079 /* Don't turn on gk20a unnecessarily */ 4086 /* Don't turn on gk20a unnecessarily */
4080 if (g->power_on) { 4087 if (g->power_on) {
4081 gk20a_busy(g->dev); 4088 err = gk20a_busy(g->dev);
4089 if (err)
4090 return err;
4091
4082 if (val && !g->pmu.perfmon_sampling_enabled) { 4092 if (val && !g->pmu.perfmon_sampling_enabled) {
4083 g->pmu.perfmon_sampling_enabled = true; 4093 g->pmu.perfmon_sampling_enabled = true;
4084 pmu_perfmon_start_sampling(&(g->pmu)); 4094 pmu_perfmon_start_sampling(&(g->pmu));