summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/fifo
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-29 07:09:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 23:40:03 -0400
commitcf7850ee33a5a9ffc32f584c7c3beefe286ceed2 (patch)
treeeaa6af1806dd3242857d41efe427f0240d7e5310 /drivers/gpu/nvgpu/common/fifo
parent2eface802a4aea417206bcdda689a65cf47d300b (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I91baa5cf1d38081161336bde5fbc06661b741273 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807133 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/fifo')
-rw-r--r--drivers/gpu/nvgpu/common/fifo/channel.c57
1 files changed, 29 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c
index 7c2bd4b4..a11d9562 100644
--- a/drivers/gpu/nvgpu/common/fifo/channel.c
+++ b/drivers/gpu/nvgpu/common/fifo/channel.c
@@ -75,13 +75,13 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
75 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a, 75 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a,
76 free_chs); 76 free_chs);
77 nvgpu_list_del(&ch->free_chs); 77 nvgpu_list_del(&ch->free_chs);
78 WARN_ON(nvgpu_atomic_read(&ch->ref_count)); 78 WARN_ON(nvgpu_atomic_read(&ch->ref_count) != 0);
79 WARN_ON(ch->referenceable); 79 WARN_ON(ch->referenceable);
80 f->used_channels++; 80 f->used_channels++;
81 } 81 }
82 nvgpu_mutex_release(&f->free_chs_mutex); 82 nvgpu_mutex_release(&f->free_chs_mutex);
83 83
84 if (g->aggressive_sync_destroy_thresh && 84 if ((g->aggressive_sync_destroy_thresh != 0U) &&
85 (f->used_channels > 85 (f->used_channels >
86 g->aggressive_sync_destroy_thresh)) { 86 g->aggressive_sync_destroy_thresh)) {
87 g->aggressive_sync_destroy = true; 87 g->aggressive_sync_destroy = true;
@@ -108,7 +108,7 @@ static void free_channel(struct fifo_gk20a *f,
108 * this is fine then because no new channels would be created. 108 * this is fine then because no new channels would be created.
109 */ 109 */
110 if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) { 110 if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
111 if (g->aggressive_sync_destroy_thresh && 111 if ((g->aggressive_sync_destroy_thresh != 0U) &&
112 (f->used_channels < 112 (f->used_channels <
113 g->aggressive_sync_destroy_thresh)) { 113 g->aggressive_sync_destroy_thresh)) {
114 g->aggressive_sync_destroy = false; 114 g->aggressive_sync_destroy = false;
@@ -251,7 +251,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
251 } 251 }
252 252
253 nvgpu_usleep_range(1000, 3000); 253 nvgpu_usleep_range(1000, 3000);
254 } while (!nvgpu_timeout_expired(&timeout)); 254 } while (nvgpu_timeout_expired(&timeout) == 0);
255 255
256 if (!channel_idle) { 256 if (!channel_idle) {
257 nvgpu_err(ch->g, "jobs not freed for channel %d", 257 nvgpu_err(ch->g, "jobs not freed for channel %d",
@@ -469,7 +469,7 @@ unbind:
469 ch->vpr = false; 469 ch->vpr = false;
470 ch->vm = NULL; 470 ch->vm = NULL;
471 471
472 WARN_ON(ch->sync); 472 WARN_ON(ch->sync != NULL);
473 473
474 /* unlink all debug sessions */ 474 /* unlink all debug sessions */
475 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 475 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -799,7 +799,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
799 799
800 nvgpu_log_fn(c->g, "size %d", orig_size); 800 nvgpu_log_fn(c->g, "size %d", orig_size);
801 801
802 if (!e) { 802 if (e == NULL) {
803 nvgpu_err(c->g, 803 nvgpu_err(c->g,
804 "ch %d: priv cmd entry is null", 804 "ch %d: priv cmd entry is null",
805 c->chid); 805 c->chid);
@@ -889,7 +889,7 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
889 } else { 889 } else {
890 *job_out = nvgpu_kzalloc(c->g, 890 *job_out = nvgpu_kzalloc(c->g,
891 sizeof(struct channel_gk20a_job)); 891 sizeof(struct channel_gk20a_job));
892 if (!*job_out) { 892 if (*job_out == NULL) {
893 err = -ENOMEM; 893 err = -ENOMEM;
894 } 894 }
895 } 895 }
@@ -1004,7 +1004,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1004 size_t size; 1004 size_t size;
1005 struct priv_cmd_entry *entries = NULL; 1005 struct priv_cmd_entry *entries = NULL;
1006 1006
1007 if (channel_gk20a_is_prealloc_enabled(c) || !num_jobs) { 1007 if ((channel_gk20a_is_prealloc_enabled(c)) || (num_jobs == 0U)) {
1008 return -EINVAL; 1008 return -EINVAL;
1009 } 1009 }
1010 1010
@@ -1018,7 +1018,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1018 c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g, 1018 c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g,
1019 num_jobs * size); 1019 num_jobs * size);
1020 } 1020 }
1021 if (!c->joblist.pre_alloc.jobs) { 1021 if (c->joblist.pre_alloc.jobs == NULL) {
1022 err = -ENOMEM; 1022 err = -ENOMEM;
1023 goto clean_up; 1023 goto clean_up;
1024 } 1024 }
@@ -1032,7 +1032,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1032 if (num_jobs <= ULONG_MAX / (size << 1)) { 1032 if (num_jobs <= ULONG_MAX / (size << 1)) {
1033 entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size); 1033 entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size);
1034 } 1034 }
1035 if (!entries) { 1035 if (entries == NULL) {
1036 err = -ENOMEM; 1036 err = -ENOMEM;
1037 goto clean_up_joblist; 1037 goto clean_up_joblist;
1038 } 1038 }
@@ -1172,7 +1172,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1172 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) { 1172 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) {
1173 c->gpfifo.pipe = nvgpu_big_malloc(g, 1173 c->gpfifo.pipe = nvgpu_big_malloc(g,
1174 gpfifo_size * gpfifo_entry_size); 1174 gpfifo_size * gpfifo_entry_size);
1175 if (!c->gpfifo.pipe) { 1175 if (c->gpfifo.pipe == NULL) {
1176 err = -ENOMEM; 1176 err = -ENOMEM;
1177 goto clean_up_unmap; 1177 goto clean_up_unmap;
1178 } 1178 }
@@ -1188,10 +1188,10 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1188 1188
1189 g->ops.fifo.setup_userd(c); 1189 g->ops.fifo.setup_userd(c);
1190 1190
1191 if (!g->aggressive_sync_destroy_thresh) { 1191 if (g->aggressive_sync_destroy_thresh == 0U) {
1192 nvgpu_mutex_acquire(&c->sync_lock); 1192 nvgpu_mutex_acquire(&c->sync_lock);
1193 c->sync = gk20a_channel_sync_create(c, false); 1193 c->sync = gk20a_channel_sync_create(c, false);
1194 if (!c->sync) { 1194 if (c->sync == NULL) {
1195 err = -ENOMEM; 1195 err = -ENOMEM;
1196 nvgpu_mutex_release(&c->sync_lock); 1196 nvgpu_mutex_release(&c->sync_lock);
1197 goto clean_up_unmap; 1197 goto clean_up_unmap;
@@ -1433,7 +1433,7 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
1433 for (chid = 0; chid < f->num_channels; chid++) { 1433 for (chid = 0; chid < f->num_channels; chid++) {
1434 struct channel_gk20a *ch = &f->channel[chid]; 1434 struct channel_gk20a *ch = &f->channel[chid];
1435 1435
1436 if (!gk20a_channel_get(ch)) { 1436 if (gk20a_channel_get(ch) == NULL) {
1437 continue; 1437 continue;
1438 } 1438 }
1439 1439
@@ -1483,7 +1483,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1483 return; 1483 return;
1484 } 1484 }
1485 1485
1486 if (!nvgpu_timeout_peek_expired(&ch->timeout.timer)) { 1486 if (nvgpu_timeout_peek_expired(&ch->timeout.timer) == 0) {
1487 /* Seems stuck but waiting to time out */ 1487 /* Seems stuck but waiting to time out */
1488 return; 1488 return;
1489 } 1489 }
@@ -1637,7 +1637,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
1637 } 1637 }
1638 nvgpu_spinlock_release(&g->channel_worker.items_lock); 1638 nvgpu_spinlock_release(&g->channel_worker.items_lock);
1639 1639
1640 if (!ch) { 1640 if (ch == NULL) {
1641 /* 1641 /*
1642 * Woke up for some other reason, but there are no 1642 * Woke up for some other reason, but there are no
1643 * other reasons than a channel added in the items list 1643 * other reasons than a channel added in the items list
@@ -1776,7 +1776,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1776 /* 1776 /*
1777 * Warn if worker thread cannot run 1777 * Warn if worker thread cannot run
1778 */ 1778 */
1779 if (WARN_ON(__nvgpu_channel_worker_start(g))) { 1779 if (WARN_ON(__nvgpu_channel_worker_start(g) != 0)) {
1780 nvgpu_warn(g, "channel worker cannot run!"); 1780 nvgpu_warn(g, "channel worker cannot run!");
1781 return; 1781 return;
1782 } 1782 }
@@ -1788,7 +1788,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1788 * the time we end up here (e.g., if the client got killed); if so, just 1788 * the time we end up here (e.g., if the client got killed); if so, just
1789 * return. 1789 * return.
1790 */ 1790 */
1791 if (!gk20a_channel_get(ch)) { 1791 if (gk20a_channel_get(ch) == NULL) {
1792 nvgpu_info(g, "cannot get ch ref for worker!"); 1792 nvgpu_info(g, "cannot get ch ref for worker!");
1793 return; 1793 return;
1794 } 1794 }
@@ -1814,7 +1814,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1814 struct priv_cmd_queue *q = &c->priv_cmd_q; 1814 struct priv_cmd_queue *q = &c->priv_cmd_q;
1815 struct gk20a *g = c->g; 1815 struct gk20a *g = c->g;
1816 1816
1817 if (!e) { 1817 if (e == NULL) {
1818 return 0; 1818 return 0;
1819 } 1819 }
1820 1820
@@ -1906,11 +1906,11 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1906 struct vm_gk20a *vm; 1906 struct vm_gk20a *vm;
1907 struct channel_gk20a_job *job; 1907 struct channel_gk20a_job *job;
1908 struct gk20a *g; 1908 struct gk20a *g;
1909 int job_finished = 0; 1909 bool job_finished = false;
1910 bool watchdog_on = false; 1910 bool watchdog_on = false;
1911 1911
1912 c = gk20a_channel_get(c); 1912 c = gk20a_channel_get(c);
1913 if (!c) { 1913 if (c == NULL) {
1914 return; 1914 return;
1915 } 1915 }
1916 1916
@@ -1970,9 +1970,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1970 break; 1970 break;
1971 } 1971 }
1972 1972
1973 WARN_ON(!c->sync); 1973 WARN_ON(c->sync == NULL);
1974 1974
1975 if (c->sync) { 1975 if (c->sync != NULL) {
1976 if (c->has_os_fence_framework_support && 1976 if (c->has_os_fence_framework_support &&
1977 g->os_channel.os_fence_framework_inst_exists(c)) { 1977 g->os_channel.os_fence_framework_inst_exists(c)) {
1978 g->os_channel.signal_os_fence_framework(c); 1978 g->os_channel.signal_os_fence_framework(c);
@@ -2024,7 +2024,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2024 nvgpu_smp_wmb(); 2024 nvgpu_smp_wmb();
2025 2025
2026 channel_gk20a_free_job(c, job); 2026 channel_gk20a_free_job(c, job);
2027 job_finished = 1; 2027 job_finished = true;
2028 2028
2029 /* 2029 /*
2030 * Deterministic channels have a channel-wide power reference; 2030 * Deterministic channels have a channel-wide power reference;
@@ -2042,7 +2042,8 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2042 2042
2043 nvgpu_mutex_release(&c->joblist.cleanup_lock); 2043 nvgpu_mutex_release(&c->joblist.cleanup_lock);
2044 2044
2045 if (job_finished && g->os_channel.work_completion_signal) { 2045 if ((job_finished) &&
2046 (g->os_channel.work_completion_signal != NULL)) {
2046 g->os_channel.work_completion_signal(c); 2047 g->os_channel.work_completion_signal(c);
2047 } 2048 }
2048 2049
@@ -2089,7 +2090,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
2089 for (chid = 0; chid < f->num_channels; chid++) { 2090 for (chid = 0; chid < f->num_channels; chid++) {
2090 struct channel_gk20a *ch = &f->channel[chid]; 2091 struct channel_gk20a *ch = &f->channel[chid];
2091 2092
2092 if (!gk20a_channel_get(ch)) { 2093 if (gk20a_channel_get(ch) == NULL) {
2093 continue; 2094 continue;
2094 } 2095 }
2095 2096
@@ -2127,7 +2128,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
2127 for (chid = 0; chid < f->num_channels; chid++) { 2128 for (chid = 0; chid < f->num_channels; chid++) {
2128 struct channel_gk20a *ch = &f->channel[chid]; 2129 struct channel_gk20a *ch = &f->channel[chid];
2129 2130
2130 if (!gk20a_channel_get(ch)) { 2131 if (gk20a_channel_get(ch) == NULL) {
2131 continue; 2132 continue;
2132 } 2133 }
2133 2134
@@ -2237,7 +2238,7 @@ int gk20a_channel_suspend(struct gk20a *g)
2237 2238
2238 for (chid = 0; chid < f->num_channels; chid++) { 2239 for (chid = 0; chid < f->num_channels; chid++) {
2239 struct channel_gk20a *ch = &f->channel[chid]; 2240 struct channel_gk20a *ch = &f->channel[chid];
2240 if (gk20a_channel_get(ch)) { 2241 if (gk20a_channel_get(ch) != NULL) {
2241 nvgpu_log_info(g, "suspend channel %d", chid); 2242 nvgpu_log_info(g, "suspend channel %d", chid);
2242 /* disable channel */ 2243 /* disable channel */
2243 gk20a_disable_channel_tsg(g, ch); 2244 gk20a_disable_channel_tsg(g, ch);