summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-29 07:09:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 23:40:03 -0400
commitcf7850ee33a5a9ffc32f584c7c3beefe286ceed2 (patch)
treeeaa6af1806dd3242857d41efe427f0240d7e5310
parent2eface802a4aea417206bcdda689a65cf47d300b (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I91baa5cf1d38081161336bde5fbc06661b741273 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807133 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/fifo/channel.c57
-rw-r--r--drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c4
-rw-r--r--drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/common/rbtree.c34
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c30
-rw-r--r--drivers/gpu/nvgpu/common/therm/therm_gp106.c4
-rw-r--r--drivers/gpu/nvgpu/common/vbios/bios.c6
-rw-r--r--drivers/gpu/nvgpu/common/xve/xve_gp106.c10
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gmmu.h2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/list.h3
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/semaphore.h4
12 files changed, 82 insertions, 78 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c
index 7c2bd4b4..a11d9562 100644
--- a/drivers/gpu/nvgpu/common/fifo/channel.c
+++ b/drivers/gpu/nvgpu/common/fifo/channel.c
@@ -75,13 +75,13 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
75 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a, 75 ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a,
76 free_chs); 76 free_chs);
77 nvgpu_list_del(&ch->free_chs); 77 nvgpu_list_del(&ch->free_chs);
78 WARN_ON(nvgpu_atomic_read(&ch->ref_count)); 78 WARN_ON(nvgpu_atomic_read(&ch->ref_count) != 0);
79 WARN_ON(ch->referenceable); 79 WARN_ON(ch->referenceable);
80 f->used_channels++; 80 f->used_channels++;
81 } 81 }
82 nvgpu_mutex_release(&f->free_chs_mutex); 82 nvgpu_mutex_release(&f->free_chs_mutex);
83 83
84 if (g->aggressive_sync_destroy_thresh && 84 if ((g->aggressive_sync_destroy_thresh != 0U) &&
85 (f->used_channels > 85 (f->used_channels >
86 g->aggressive_sync_destroy_thresh)) { 86 g->aggressive_sync_destroy_thresh)) {
87 g->aggressive_sync_destroy = true; 87 g->aggressive_sync_destroy = true;
@@ -108,7 +108,7 @@ static void free_channel(struct fifo_gk20a *f,
108 * this is fine then because no new channels would be created. 108 * this is fine then because no new channels would be created.
109 */ 109 */
110 if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) { 110 if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
111 if (g->aggressive_sync_destroy_thresh && 111 if ((g->aggressive_sync_destroy_thresh != 0U) &&
112 (f->used_channels < 112 (f->used_channels <
113 g->aggressive_sync_destroy_thresh)) { 113 g->aggressive_sync_destroy_thresh)) {
114 g->aggressive_sync_destroy = false; 114 g->aggressive_sync_destroy = false;
@@ -251,7 +251,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
251 } 251 }
252 252
253 nvgpu_usleep_range(1000, 3000); 253 nvgpu_usleep_range(1000, 3000);
254 } while (!nvgpu_timeout_expired(&timeout)); 254 } while (nvgpu_timeout_expired(&timeout) == 0);
255 255
256 if (!channel_idle) { 256 if (!channel_idle) {
257 nvgpu_err(ch->g, "jobs not freed for channel %d", 257 nvgpu_err(ch->g, "jobs not freed for channel %d",
@@ -469,7 +469,7 @@ unbind:
469 ch->vpr = false; 469 ch->vpr = false;
470 ch->vm = NULL; 470 ch->vm = NULL;
471 471
472 WARN_ON(ch->sync); 472 WARN_ON(ch->sync != NULL);
473 473
474 /* unlink all debug sessions */ 474 /* unlink all debug sessions */
475 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 475 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -799,7 +799,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
799 799
800 nvgpu_log_fn(c->g, "size %d", orig_size); 800 nvgpu_log_fn(c->g, "size %d", orig_size);
801 801
802 if (!e) { 802 if (e == NULL) {
803 nvgpu_err(c->g, 803 nvgpu_err(c->g,
804 "ch %d: priv cmd entry is null", 804 "ch %d: priv cmd entry is null",
805 c->chid); 805 c->chid);
@@ -889,7 +889,7 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
889 } else { 889 } else {
890 *job_out = nvgpu_kzalloc(c->g, 890 *job_out = nvgpu_kzalloc(c->g,
891 sizeof(struct channel_gk20a_job)); 891 sizeof(struct channel_gk20a_job));
892 if (!*job_out) { 892 if (*job_out == NULL) {
893 err = -ENOMEM; 893 err = -ENOMEM;
894 } 894 }
895 } 895 }
@@ -1004,7 +1004,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1004 size_t size; 1004 size_t size;
1005 struct priv_cmd_entry *entries = NULL; 1005 struct priv_cmd_entry *entries = NULL;
1006 1006
1007 if (channel_gk20a_is_prealloc_enabled(c) || !num_jobs) { 1007 if ((channel_gk20a_is_prealloc_enabled(c)) || (num_jobs == 0U)) {
1008 return -EINVAL; 1008 return -EINVAL;
1009 } 1009 }
1010 1010
@@ -1018,7 +1018,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1018 c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g, 1018 c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g,
1019 num_jobs * size); 1019 num_jobs * size);
1020 } 1020 }
1021 if (!c->joblist.pre_alloc.jobs) { 1021 if (c->joblist.pre_alloc.jobs == NULL) {
1022 err = -ENOMEM; 1022 err = -ENOMEM;
1023 goto clean_up; 1023 goto clean_up;
1024 } 1024 }
@@ -1032,7 +1032,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1032 if (num_jobs <= ULONG_MAX / (size << 1)) { 1032 if (num_jobs <= ULONG_MAX / (size << 1)) {
1033 entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size); 1033 entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size);
1034 } 1034 }
1035 if (!entries) { 1035 if (entries == NULL) {
1036 err = -ENOMEM; 1036 err = -ENOMEM;
1037 goto clean_up_joblist; 1037 goto clean_up_joblist;
1038 } 1038 }
@@ -1172,7 +1172,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1172 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) { 1172 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) {
1173 c->gpfifo.pipe = nvgpu_big_malloc(g, 1173 c->gpfifo.pipe = nvgpu_big_malloc(g,
1174 gpfifo_size * gpfifo_entry_size); 1174 gpfifo_size * gpfifo_entry_size);
1175 if (!c->gpfifo.pipe) { 1175 if (c->gpfifo.pipe == NULL) {
1176 err = -ENOMEM; 1176 err = -ENOMEM;
1177 goto clean_up_unmap; 1177 goto clean_up_unmap;
1178 } 1178 }
@@ -1188,10 +1188,10 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1188 1188
1189 g->ops.fifo.setup_userd(c); 1189 g->ops.fifo.setup_userd(c);
1190 1190
1191 if (!g->aggressive_sync_destroy_thresh) { 1191 if (g->aggressive_sync_destroy_thresh == 0U) {
1192 nvgpu_mutex_acquire(&c->sync_lock); 1192 nvgpu_mutex_acquire(&c->sync_lock);
1193 c->sync = gk20a_channel_sync_create(c, false); 1193 c->sync = gk20a_channel_sync_create(c, false);
1194 if (!c->sync) { 1194 if (c->sync == NULL) {
1195 err = -ENOMEM; 1195 err = -ENOMEM;
1196 nvgpu_mutex_release(&c->sync_lock); 1196 nvgpu_mutex_release(&c->sync_lock);
1197 goto clean_up_unmap; 1197 goto clean_up_unmap;
@@ -1433,7 +1433,7 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
1433 for (chid = 0; chid < f->num_channels; chid++) { 1433 for (chid = 0; chid < f->num_channels; chid++) {
1434 struct channel_gk20a *ch = &f->channel[chid]; 1434 struct channel_gk20a *ch = &f->channel[chid];
1435 1435
1436 if (!gk20a_channel_get(ch)) { 1436 if (gk20a_channel_get(ch) == NULL) {
1437 continue; 1437 continue;
1438 } 1438 }
1439 1439
@@ -1483,7 +1483,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1483 return; 1483 return;
1484 } 1484 }
1485 1485
1486 if (!nvgpu_timeout_peek_expired(&ch->timeout.timer)) { 1486 if (nvgpu_timeout_peek_expired(&ch->timeout.timer) == 0) {
1487 /* Seems stuck but waiting to time out */ 1487 /* Seems stuck but waiting to time out */
1488 return; 1488 return;
1489 } 1489 }
@@ -1637,7 +1637,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
1637 } 1637 }
1638 nvgpu_spinlock_release(&g->channel_worker.items_lock); 1638 nvgpu_spinlock_release(&g->channel_worker.items_lock);
1639 1639
1640 if (!ch) { 1640 if (ch == NULL) {
1641 /* 1641 /*
1642 * Woke up for some other reason, but there are no 1642 * Woke up for some other reason, but there are no
1643 * other reasons than a channel added in the items list 1643 * other reasons than a channel added in the items list
@@ -1776,7 +1776,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1776 /* 1776 /*
1777 * Warn if worker thread cannot run 1777 * Warn if worker thread cannot run
1778 */ 1778 */
1779 if (WARN_ON(__nvgpu_channel_worker_start(g))) { 1779 if (WARN_ON(__nvgpu_channel_worker_start(g) != 0)) {
1780 nvgpu_warn(g, "channel worker cannot run!"); 1780 nvgpu_warn(g, "channel worker cannot run!");
1781 return; 1781 return;
1782 } 1782 }
@@ -1788,7 +1788,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
1788 * the time we end up here (e.g., if the client got killed); if so, just 1788 * the time we end up here (e.g., if the client got killed); if so, just
1789 * return. 1789 * return.
1790 */ 1790 */
1791 if (!gk20a_channel_get(ch)) { 1791 if (gk20a_channel_get(ch) == NULL) {
1792 nvgpu_info(g, "cannot get ch ref for worker!"); 1792 nvgpu_info(g, "cannot get ch ref for worker!");
1793 return; 1793 return;
1794 } 1794 }
@@ -1814,7 +1814,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
1814 struct priv_cmd_queue *q = &c->priv_cmd_q; 1814 struct priv_cmd_queue *q = &c->priv_cmd_q;
1815 struct gk20a *g = c->g; 1815 struct gk20a *g = c->g;
1816 1816
1817 if (!e) { 1817 if (e == NULL) {
1818 return 0; 1818 return 0;
1819 } 1819 }
1820 1820
@@ -1906,11 +1906,11 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1906 struct vm_gk20a *vm; 1906 struct vm_gk20a *vm;
1907 struct channel_gk20a_job *job; 1907 struct channel_gk20a_job *job;
1908 struct gk20a *g; 1908 struct gk20a *g;
1909 int job_finished = 0; 1909 bool job_finished = false;
1910 bool watchdog_on = false; 1910 bool watchdog_on = false;
1911 1911
1912 c = gk20a_channel_get(c); 1912 c = gk20a_channel_get(c);
1913 if (!c) { 1913 if (c == NULL) {
1914 return; 1914 return;
1915 } 1915 }
1916 1916
@@ -1970,9 +1970,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1970 break; 1970 break;
1971 } 1971 }
1972 1972
1973 WARN_ON(!c->sync); 1973 WARN_ON(c->sync == NULL);
1974 1974
1975 if (c->sync) { 1975 if (c->sync != NULL) {
1976 if (c->has_os_fence_framework_support && 1976 if (c->has_os_fence_framework_support &&
1977 g->os_channel.os_fence_framework_inst_exists(c)) { 1977 g->os_channel.os_fence_framework_inst_exists(c)) {
1978 g->os_channel.signal_os_fence_framework(c); 1978 g->os_channel.signal_os_fence_framework(c);
@@ -2024,7 +2024,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2024 nvgpu_smp_wmb(); 2024 nvgpu_smp_wmb();
2025 2025
2026 channel_gk20a_free_job(c, job); 2026 channel_gk20a_free_job(c, job);
2027 job_finished = 1; 2027 job_finished = true;
2028 2028
2029 /* 2029 /*
2030 * Deterministic channels have a channel-wide power reference; 2030 * Deterministic channels have a channel-wide power reference;
@@ -2042,7 +2042,8 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2042 2042
2043 nvgpu_mutex_release(&c->joblist.cleanup_lock); 2043 nvgpu_mutex_release(&c->joblist.cleanup_lock);
2044 2044
2045 if (job_finished && g->os_channel.work_completion_signal) { 2045 if ((job_finished) &&
2046 (g->os_channel.work_completion_signal != NULL)) {
2046 g->os_channel.work_completion_signal(c); 2047 g->os_channel.work_completion_signal(c);
2047 } 2048 }
2048 2049
@@ -2089,7 +2090,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
2089 for (chid = 0; chid < f->num_channels; chid++) { 2090 for (chid = 0; chid < f->num_channels; chid++) {
2090 struct channel_gk20a *ch = &f->channel[chid]; 2091 struct channel_gk20a *ch = &f->channel[chid];
2091 2092
2092 if (!gk20a_channel_get(ch)) { 2093 if (gk20a_channel_get(ch) == NULL) {
2093 continue; 2094 continue;
2094 } 2095 }
2095 2096
@@ -2127,7 +2128,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
2127 for (chid = 0; chid < f->num_channels; chid++) { 2128 for (chid = 0; chid < f->num_channels; chid++) {
2128 struct channel_gk20a *ch = &f->channel[chid]; 2129 struct channel_gk20a *ch = &f->channel[chid];
2129 2130
2130 if (!gk20a_channel_get(ch)) { 2131 if (gk20a_channel_get(ch) == NULL) {
2131 continue; 2132 continue;
2132 } 2133 }
2133 2134
@@ -2237,7 +2238,7 @@ int gk20a_channel_suspend(struct gk20a *g)
2237 2238
2238 for (chid = 0; chid < f->num_channels; chid++) { 2239 for (chid = 0; chid < f->num_channels; chid++) {
2239 struct channel_gk20a *ch = &f->channel[chid]; 2240 struct channel_gk20a *ch = &f->channel[chid];
2240 if (gk20a_channel_get(ch)) { 2241 if (gk20a_channel_get(ch) != NULL) {
2241 nvgpu_log_info(g, "suspend channel %d", chid); 2242 nvgpu_log_info(g, "suspend channel %d", chid);
2242 /* disable channel */ 2243 /* disable channel */
2243 gk20a_disable_channel_tsg(g, ch); 2244 gk20a_disable_channel_tsg(g, ch);
diff --git a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
index c169115e..24c35576 100644
--- a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
+++ b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
@@ -98,7 +98,7 @@ void gm20b_priv_ring_isr(struct gk20a *g)
98 /* poll for clear interrupt done */ 98 /* poll for clear interrupt done */
99 cmd = pri_ringmaster_command_cmd_v( 99 cmd = pri_ringmaster_command_cmd_v(
100 gk20a_readl(g, pri_ringmaster_command_r())); 100 gk20a_readl(g, pri_ringmaster_command_r()));
101 while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && retry) { 101 while ((cmd != pri_ringmaster_command_cmd_no_cmd_v()) && (retry != 0)) {
102 nvgpu_udelay(20); 102 nvgpu_udelay(20);
103 retry--; 103 retry--;
104 cmd = pri_ringmaster_command_cmd_v( 104 cmd = pri_ringmaster_command_cmd_v(
diff --git a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c
index 53141c9a..f8a136c6 100644
--- a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c
+++ b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gp10b.c
@@ -181,7 +181,7 @@ void gp10b_priv_ring_isr(struct gk20a *g)
181 } 181 }
182 182
183 status1 = status1 & (~(BIT(gpc))); 183 status1 = status1 & (~(BIT(gpc)));
184 if (!status1) { 184 if (status1 == 0U) {
185 break; 185 break;
186 } 186 }
187 } 187 }
@@ -196,7 +196,7 @@ void gp10b_priv_ring_isr(struct gk20a *g)
196 /* poll for clear interrupt done */ 196 /* poll for clear interrupt done */
197 cmd = pri_ringmaster_command_cmd_v( 197 cmd = pri_ringmaster_command_cmd_v(
198 gk20a_readl(g, pri_ringmaster_command_r())); 198 gk20a_readl(g, pri_ringmaster_command_r()));
199 while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && retry) { 199 while ((cmd != pri_ringmaster_command_cmd_no_cmd_v()) && (retry != 0)) {
200 nvgpu_udelay(20); 200 nvgpu_udelay(20);
201 cmd = pri_ringmaster_command_cmd_v( 201 cmd = pri_ringmaster_command_cmd_v(
202 gk20a_readl(g, pri_ringmaster_command_r())); 202 gk20a_readl(g, pri_ringmaster_command_r()));
diff --git a/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c b/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c
index a9c971df..eeb6e460 100644
--- a/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c
+++ b/drivers/gpu/nvgpu/common/ptimer/ptimer_gk20a.c
@@ -46,7 +46,7 @@ void gk20a_ptimer_isr(struct gk20a *g)
46 nvgpu_err(g, "PRI timeout: ADR 0x%08x " 46 nvgpu_err(g, "PRI timeout: ADR 0x%08x "
47 "%s DATA 0x%08x", 47 "%s DATA 0x%08x",
48 timer_pri_timeout_save_0_addr_v(save0) << 2, 48 timer_pri_timeout_save_0_addr_v(save0) << 2,
49 timer_pri_timeout_save_0_write_v(save0) ? 49 (timer_pri_timeout_save_0_write_v(save0) != 0U) ?
50 "WRITE" : "READ", save1); 50 "WRITE" : "READ", save1);
51 51
52 gk20a_writel(g, timer_pri_timeout_save_0_r(), 0); 52 gk20a_writel(g, timer_pri_timeout_save_0_r(), 0);
@@ -67,7 +67,7 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value)
67 unsigned int i = 0; 67 unsigned int i = 0;
68 u32 gpu_timestamp_hi_prev = 0; 68 u32 gpu_timestamp_hi_prev = 0;
69 69
70 if (!value) { 70 if (value == NULL) {
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 73
diff --git a/drivers/gpu/nvgpu/common/rbtree.c b/drivers/gpu/nvgpu/common/rbtree.c
index a0e97ee9..33735a4f 100644
--- a/drivers/gpu/nvgpu/common/rbtree.c
+++ b/drivers/gpu/nvgpu/common/rbtree.c
@@ -96,7 +96,7 @@ static void insert_fixup(struct nvgpu_rbtree_node **root,
96 if (x->parent == x->parent->parent->left) { 96 if (x->parent == x->parent->parent->left) {
97 struct nvgpu_rbtree_node *y = x->parent->parent->right; 97 struct nvgpu_rbtree_node *y = x->parent->parent->right;
98 98
99 if (y && y->is_red) { 99 if ((y != NULL) && (y->is_red)) {
100 /* uncle is RED */ 100 /* uncle is RED */
101 x->parent->is_red = false; 101 x->parent->is_red = false;
102 y->is_red = false; 102 y->is_red = false;
@@ -119,7 +119,7 @@ static void insert_fixup(struct nvgpu_rbtree_node **root,
119 /* mirror image of above code */ 119 /* mirror image of above code */
120 struct nvgpu_rbtree_node *y = x->parent->parent->left; 120 struct nvgpu_rbtree_node *y = x->parent->parent->left;
121 121
122 if (y && y->is_red) { 122 if ((y != NULL) && (y->is_red)) {
123 /* uncle is RED */ 123 /* uncle is RED */
124 x->parent->is_red = false; 124 x->parent->is_red = false;
125 y->is_red = false; 125 y->is_red = false;
@@ -189,7 +189,7 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
189 struct nvgpu_rbtree_node *parent_of_x, 189 struct nvgpu_rbtree_node *parent_of_x,
190 struct nvgpu_rbtree_node *x) 190 struct nvgpu_rbtree_node *x)
191{ 191{
192 while ((x != *root) && (!x || !x->is_red)) { 192 while ((x != *root) && ((x == NULL) || (!x->is_red))) {
193 /* 193 /*
194 * NULL nodes are sentinel nodes. If we delete a sentinel 194 * NULL nodes are sentinel nodes. If we delete a sentinel
195 * node (x==NULL) it must have a parent node (or be the root). 195 * node (x==NULL) it must have a parent node (or be the root).
@@ -200,21 +200,21 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
200 if ((parent_of_x != NULL) && (x == parent_of_x->left)) { 200 if ((parent_of_x != NULL) && (x == parent_of_x->left)) {
201 struct nvgpu_rbtree_node *w = parent_of_x->right; 201 struct nvgpu_rbtree_node *w = parent_of_x->right;
202 202
203 if (w && w->is_red) { 203 if ((w != NULL) && (w->is_red)) {
204 w->is_red = false; 204 w->is_red = false;
205 parent_of_x->is_red = true; 205 parent_of_x->is_red = true;
206 rotate_left(root, parent_of_x); 206 rotate_left(root, parent_of_x);
207 w = parent_of_x->right; 207 w = parent_of_x->right;
208 } 208 }
209 209
210 if (!w || ((!w->left || !w->left->is_red) 210 if ((w == NULL) || (((w->left == NULL) || (!w->left->is_red)) &&
211 && (!w->right || !w->right->is_red))) { 211 ((w->right == NULL) || (!w->right->is_red)))) {
212 if (w) { 212 if (w != NULL) {
213 w->is_red = true; 213 w->is_red = true;
214 } 214 }
215 x = parent_of_x; 215 x = parent_of_x;
216 } else { 216 } else {
217 if (!w->right || !w->right->is_red) { 217 if ((w->right == NULL) || (!w->right->is_red)) {
218 w->left->is_red = false; 218 w->left->is_red = false;
219 w->is_red = true; 219 w->is_red = true;
220 rotate_right(root, w); 220 rotate_right(root, w);
@@ -229,21 +229,21 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
229 } else if (parent_of_x != NULL) { 229 } else if (parent_of_x != NULL) {
230 struct nvgpu_rbtree_node *w = parent_of_x->left; 230 struct nvgpu_rbtree_node *w = parent_of_x->left;
231 231
232 if (w && w->is_red) { 232 if ((w != NULL) && (w->is_red)) {
233 w->is_red = false; 233 w->is_red = false;
234 parent_of_x->is_red = true; 234 parent_of_x->is_red = true;
235 rotate_right(root, parent_of_x); 235 rotate_right(root, parent_of_x);
236 w = parent_of_x->left; 236 w = parent_of_x->left;
237 } 237 }
238 238
239 if (!w || ((!w->right || !w->right->is_red) 239 if ((w == NULL) || (((w->right == NULL) || (!w->right->is_red)) &&
240 && (!w->left || !w->left->is_red))) { 240 ((w->left == NULL) || (!w->left->is_red)))) {
241 if (w) { 241 if (w != NULL) {
242 w->is_red = true; 242 w->is_red = true;
243 } 243 }
244 x = parent_of_x; 244 x = parent_of_x;
245 } else { 245 } else {
246 if (!w->left || !w->left->is_red) { 246 if ((w->left == NULL) || (!w->left->is_red)) {
247 w->right->is_red = false; 247 w->right->is_red = false;
248 w->is_red = true; 248 w->is_red = true;
249 rotate_left(root, w); 249 rotate_left(root, w);
@@ -259,7 +259,7 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
259 parent_of_x = x->parent; 259 parent_of_x = x->parent;
260 } 260 }
261 261
262 if (x) { 262 if (x != NULL) {
263 x->is_red = false; 263 x->is_red = false;
264 } 264 }
265} 265}
@@ -276,7 +276,7 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
276 z = node; 276 z = node;
277 277
278 /* unlink */ 278 /* unlink */
279 if (!z->left || !z->right) { 279 if ((z->left == NULL) || (z->right == NULL)) {
280 /* y has a SENTINEL node as a child */ 280 /* y has a SENTINEL node as a child */
281 y = z; 281 y = z;
282 } else { 282 } else {
@@ -296,7 +296,7 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
296 296
297 /* remove y from the parent chain */ 297 /* remove y from the parent chain */
298 parent_of_x = y->parent; 298 parent_of_x = y->parent;
299 if (x) { 299 if (x != NULL) {
300 x->parent = parent_of_x; 300 x->parent = parent_of_x;
301 } 301 }
302 302
@@ -431,7 +431,7 @@ void nvgpu_rbtree_enum_next(struct nvgpu_rbtree_node **node,
431{ 431{
432 struct nvgpu_rbtree_node *curr = NULL; 432 struct nvgpu_rbtree_node *curr = NULL;
433 433
434 if (root && *node) { 434 if ((root != NULL) && (*node != NULL)) {
435 /* if we don't have a right subtree return the parent */ 435 /* if we don't have a right subtree return the parent */
436 curr = *node; 436 curr = *node;
437 437
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 5814a737..39852273 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -90,7 +90,7 @@ out:
90 90
91void nvgpu_semaphore_sea_destroy(struct gk20a *g) 91void nvgpu_semaphore_sea_destroy(struct gk20a *g)
92{ 92{
93 if (!g->sema_sea) { 93 if (g->sema_sea == NULL) {
94 return; 94 return;
95 } 95 }
96 96
@@ -111,7 +111,7 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
111 } 111 }
112 112
113 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea)); 113 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea));
114 if (!g->sema_sea) { 114 if (g->sema_sea == NULL) {
115 return NULL; 115 return NULL;
116 } 116 }
117 117
@@ -163,7 +163,7 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
163 int ret; 163 int ret;
164 164
165 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p)); 165 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p));
166 if (!p) { 166 if (p == NULL) {
167 return -ENOMEM; 167 return -ENOMEM;
168 } 168 }
169 169
@@ -234,13 +234,13 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
234 p->sema_sea->map_size, 234 p->sema_sea->map_size,
235 0, gk20a_mem_flag_read_only, 0, 235 0, gk20a_mem_flag_read_only, 0,
236 p->sema_sea->sea_mem.aperture); 236 p->sema_sea->sea_mem.aperture);
237 if (!addr) { 237 if (addr == 0ULL) {
238 err = -ENOMEM; 238 err = -ENOMEM;
239 goto fail_unlock; 239 goto fail_unlock;
240 } 240 }
241 241
242 p->gpu_va_ro = addr; 242 p->gpu_va_ro = addr;
243 p->mapped = 1; 243 p->mapped = true;
244 244
245 gpu_sema_dbg(pool_to_gk20a(p), 245 gpu_sema_dbg(pool_to_gk20a(p),
246 " %d: GPU read-only VA = 0x%llx", 246 " %d: GPU read-only VA = 0x%llx",
@@ -262,7 +262,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
262 gk20a_mem_flag_none, 0, 262 gk20a_mem_flag_none, 0,
263 p->rw_mem.aperture); 263 p->rw_mem.aperture);
264 264
265 if (!addr) { 265 if (addr == 0ULL) {
266 err = -ENOMEM; 266 err = -ENOMEM;
267 goto fail_free_submem; 267 goto fail_free_submem;
268 } 268 }
@@ -305,7 +305,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
305 305
306 p->gpu_va = 0; 306 p->gpu_va = 0;
307 p->gpu_va_ro = 0; 307 p->gpu_va_ro = 0;
308 p->mapped = 0; 308 p->mapped = false;
309 309
310 __unlock_sema_sea(p->sema_sea); 310 __unlock_sema_sea(p->sema_sea);
311 311
@@ -324,7 +324,9 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
324 struct nvgpu_semaphore_sea *s = p->sema_sea; 324 struct nvgpu_semaphore_sea *s = p->sema_sea;
325 325
326 /* Freeing a mapped pool is a bad idea. */ 326 /* Freeing a mapped pool is a bad idea. */
327 WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro); 327 WARN_ON((p->mapped) ||
328 (p->gpu_va != 0ULL) ||
329 (p->gpu_va_ro != 0ULL));
328 330
329 __lock_sema_sea(s); 331 __lock_sema_sea(s);
330 nvgpu_list_del(&p->pool_list_entry); 332 nvgpu_list_del(&p->pool_list_entry);
@@ -370,7 +372,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
370 struct nvgpu_semaphore_pool *p = ch->vm->sema_pool; 372 struct nvgpu_semaphore_pool *p = ch->vm->sema_pool;
371 int current_value; 373 int current_value;
372 374
373 BUG_ON(!p); 375 BUG_ON(p == NULL);
374 376
375 nvgpu_mutex_acquire(&p->pool_lock); 377 nvgpu_mutex_acquire(&p->pool_lock);
376 378
@@ -383,7 +385,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
383 } 385 }
384 386
385 hw_sema = nvgpu_kzalloc(ch->g, sizeof(struct nvgpu_semaphore_int)); 387 hw_sema = nvgpu_kzalloc(ch->g, sizeof(struct nvgpu_semaphore_int));
386 if (!hw_sema) { 388 if (hw_sema == NULL) {
387 ret = -ENOMEM; 389 ret = -ENOMEM;
388 goto fail_free_idx; 390 goto fail_free_idx;
389 } 391 }
@@ -416,7 +418,7 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
416 struct nvgpu_semaphore_int *hw_sema = ch->hw_sema; 418 struct nvgpu_semaphore_int *hw_sema = ch->hw_sema;
417 int idx = hw_sema->location.offset / SEMAPHORE_SIZE; 419 int idx = hw_sema->location.offset / SEMAPHORE_SIZE;
418 420
419 BUG_ON(!p); 421 BUG_ON(p == NULL);
420 422
421 nvgpu_mutex_acquire(&p->pool_lock); 423 nvgpu_mutex_acquire(&p->pool_lock);
422 424
@@ -439,7 +441,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
439 struct nvgpu_semaphore *s; 441 struct nvgpu_semaphore *s;
440 int ret; 442 int ret;
441 443
442 if (!ch->hw_sema) { 444 if (ch->hw_sema == NULL) {
443 ret = __nvgpu_init_hw_sema(ch); 445 ret = __nvgpu_init_hw_sema(ch);
444 if (ret) { 446 if (ret) {
445 return NULL; 447 return NULL;
@@ -447,7 +449,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
447 } 449 }
448 450
449 s = nvgpu_kzalloc(ch->g, sizeof(*s)); 451 s = nvgpu_kzalloc(ch->g, sizeof(*s));
450 if (!s) { 452 if (s == NULL) {
451 return NULL; 453 return NULL;
452 } 454 }
453 455
@@ -619,7 +621,7 @@ void nvgpu_semaphore_prepare(struct nvgpu_semaphore *s,
619 WARN_ON(s->incremented); 621 WARN_ON(s->incremented);
620 622
621 nvgpu_atomic_set(&s->value, next); 623 nvgpu_atomic_set(&s->value, next);
622 s->incremented = 1; 624 s->incremented = true;
623 625
624 gpu_sema_verbose_dbg(s->g, "INCR sema for c=%d (%u)", 626 gpu_sema_verbose_dbg(s->g, "INCR sema for c=%d (%u)",
625 hw_sema->ch->chid, next); 627 hw_sema->ch->chid, next);
diff --git a/drivers/gpu/nvgpu/common/therm/therm_gp106.c b/drivers/gpu/nvgpu/common/therm/therm_gp106.c
index cc3127d7..b22cefeb 100644
--- a/drivers/gpu/nvgpu/common/therm/therm_gp106.c
+++ b/drivers/gpu/nvgpu/common/therm/therm_gp106.c
@@ -44,8 +44,8 @@ int gp106_get_internal_sensor_curr_temp(struct gk20a *g, u32 *temp_f24_8)
44 44
45 readval = gk20a_readl(g, therm_temp_sensor_tsense_r()); 45 readval = gk20a_readl(g, therm_temp_sensor_tsense_r());
46 46
47 if (!(therm_temp_sensor_tsense_state_v(readval) & 47 if ((therm_temp_sensor_tsense_state_v(readval) &
48 therm_temp_sensor_tsense_state_valid_v())) { 48 therm_temp_sensor_tsense_state_valid_v()) == 0U) {
49 nvgpu_err(g, 49 nvgpu_err(g,
50 "Attempt to read temperature while sensor is OFF!"); 50 "Attempt to read temperature while sensor is OFF!");
51 err = -EINVAL; 51 err = -EINVAL;
diff --git a/drivers/gpu/nvgpu/common/vbios/bios.c b/drivers/gpu/nvgpu/common/vbios/bios.c
index fc82c2e9..4e851b39 100644
--- a/drivers/gpu/nvgpu/common/vbios/bios.c
+++ b/drivers/gpu/nvgpu/common/vbios/bios.c
@@ -291,7 +291,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
291 bool found = false; 291 bool found = false;
292 unsigned int i; 292 unsigned int i;
293 293
294 while (!last) { 294 while (last == 0) {
295 struct pci_exp_rom *pci_rom; 295 struct pci_exp_rom *pci_rom;
296 struct pci_data_struct *pci_data; 296 struct pci_data_struct *pci_data;
297 struct pci_ext_data_struct *pci_ext_data; 297 struct pci_ext_data_struct *pci_ext_data;
@@ -790,7 +790,7 @@ s8 nvgpu_bios_read_s8(struct gk20a *g, u32 offset)
790{ 790{
791 u32 val; 791 u32 val;
792 val = __nvgpu_bios_readbyte(g, offset); 792 val = __nvgpu_bios_readbyte(g, offset);
793 val = val & 0x80U ? (val | ~0xffU) : val; 793 val = ((val & 0x80U) != 0U) ? (val | ~0xffU) : val;
794 794
795 return (s8) val; 795 return (s8) val;
796} 796}
@@ -827,7 +827,7 @@ static void nvgpu_bios_init_xmemsel_zm_nv_reg_array(struct gk20a *g, bool *condi
827 827
828 strap = gk20a_readl(g, gc6_sci_strap_r()) & 0xfU; 828 strap = gk20a_readl(g, gc6_sci_strap_r()) & 0xfU;
829 829
830 index = g->bios.mem_strap_xlat_tbl_ptr ? 830 index = (g->bios.mem_strap_xlat_tbl_ptr != 0U) ?
831 nvgpu_bios_read_u8(g, g->bios.mem_strap_xlat_tbl_ptr + 831 nvgpu_bios_read_u8(g, g->bios.mem_strap_xlat_tbl_ptr +
832 strap) : strap; 832 strap) : strap;
833 833
diff --git a/drivers/gpu/nvgpu/common/xve/xve_gp106.c b/drivers/gpu/nvgpu/common/xve/xve_gp106.c
index 4f3def11..3ed02f1b 100644
--- a/drivers/gpu/nvgpu/common/xve/xve_gp106.c
+++ b/drivers/gpu/nvgpu/common/xve/xve_gp106.c
@@ -111,7 +111,7 @@ int xve_get_speed_gp106(struct gk20a *g, u32 *xve_link_speed)
111 if (link_speed == xve_link_control_status_link_speed_link_speed_8p0_v()) 111 if (link_speed == xve_link_control_status_link_speed_link_speed_8p0_v())
112 real_link_speed = GPU_XVE_SPEED_8P0; 112 real_link_speed = GPU_XVE_SPEED_8P0;
113 113
114 if (!real_link_speed) 114 if (real_link_speed == 0U)
115 return -ENODEV; 115 return -ENODEV;
116 116
117 *xve_link_speed = real_link_speed; 117 *xve_link_speed = real_link_speed;
@@ -147,7 +147,7 @@ static void set_xve_l0s_mask(struct gk20a *g, bool status)
147static void set_xve_l1_mask(struct gk20a *g, int status) 147static void set_xve_l1_mask(struct gk20a *g, int status)
148{ 148{
149 u32 xve_priv; 149 u32 xve_priv;
150 u32 status_bit = status ? 1 : 0; 150 u32 status_bit = (status != 0) ? 1 : 0;
151 151
152 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r()); 152 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
153 153
@@ -242,7 +242,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
242 (xp_pl_link_config_ltssm_directive_f(pl_link_config) == 242 (xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
243 xp_pl_link_config_ltssm_directive_normal_operations_v())) 243 xp_pl_link_config_ltssm_directive_normal_operations_v()))
244 break; 244 break;
245 } while (!nvgpu_timeout_expired(&timeout)); 245 } while (nvgpu_timeout_expired(&timeout) == 0);
246 246
247 if (nvgpu_timeout_peek_expired(&timeout)) { 247 if (nvgpu_timeout_peek_expired(&timeout)) {
248 err_status = -ETIMEDOUT; 248 err_status = -ETIMEDOUT;
@@ -313,7 +313,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
313 if (pl_link_config == 313 if (pl_link_config ==
314 gk20a_readl(g, xp_pl_link_config_r(0))) 314 gk20a_readl(g, xp_pl_link_config_r(0)))
315 break; 315 break;
316 } while (!nvgpu_timeout_expired(&timeout)); 316 } while (nvgpu_timeout_expired(&timeout) == 0);
317 317
318 if (nvgpu_timeout_peek_expired(&timeout)) { 318 if (nvgpu_timeout_peek_expired(&timeout)) {
319 err_status = -ETIMEDOUT; 319 err_status = -ETIMEDOUT;
@@ -348,7 +348,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
348 (xp_pl_link_config_ltssm_directive_f(pl_link_config) == 348 (xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
349 xp_pl_link_config_ltssm_directive_normal_operations_v())) 349 xp_pl_link_config_ltssm_directive_normal_operations_v()))
350 break; 350 break;
351 } while (!nvgpu_timeout_expired(&timeout)); 351 } while (nvgpu_timeout_expired(&timeout) == 0);
352 352
353 if (nvgpu_timeout_peek_expired(&timeout)) { 353 if (nvgpu_timeout_peek_expired(&timeout)) {
354 err_status = -ETIMEDOUT; 354 err_status = -ETIMEDOUT;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
index e58f5498..da27e29c 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h
@@ -330,7 +330,7 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte);
330 */ 330 */
331#define pte_dbg(g, attrs, fmt, args...) \ 331#define pte_dbg(g, attrs, fmt, args...) \
332 do { \ 332 do { \
333 if (attrs && attrs->debug) \ 333 if ((attrs != NULL) && (attrs->debug)) \
334 nvgpu_info(g, fmt, ##args); \ 334 nvgpu_info(g, fmt, ##args); \
335 else \ 335 else \
336 nvgpu_log(g, gpu_dbg_pte, fmt, ##args); \ 336 nvgpu_log(g, gpu_dbg_pte, fmt, ##args); \
diff --git a/drivers/gpu/nvgpu/include/nvgpu/list.h b/drivers/gpu/nvgpu/include/nvgpu/list.h
index a7e13cab..16080353 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/list.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/list.h
@@ -22,6 +22,7 @@
22 22
23#ifndef NVGPU_LIST_H 23#ifndef NVGPU_LIST_H
24#define NVGPU_LIST_H 24#define NVGPU_LIST_H
25#include <nvgpu/types.h>
25 26
26struct nvgpu_list_node { 27struct nvgpu_list_node {
27 struct nvgpu_list_node *prev; 28 struct nvgpu_list_node *prev;
@@ -57,7 +58,7 @@ static inline void nvgpu_list_del(struct nvgpu_list_node *node)
57 nvgpu_init_list_node(node); 58 nvgpu_init_list_node(node);
58} 59}
59 60
60static inline int nvgpu_list_empty(struct nvgpu_list_node *head) 61static inline bool nvgpu_list_empty(struct nvgpu_list_node *head)
61{ 62{
62 return head->next == head; 63 return head->next == head;
63} 64}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
index a7bd79cb..85175069 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
@@ -72,7 +72,7 @@ struct nvgpu_semaphore {
72 struct nvgpu_semaphore_loc location; 72 struct nvgpu_semaphore_loc location;
73 73
74 nvgpu_atomic_t value; 74 nvgpu_atomic_t value;
75 int incremented; 75 bool incremented;
76 76
77 struct nvgpu_ref ref; 77 struct nvgpu_ref ref;
78}; 78};
@@ -99,7 +99,7 @@ struct nvgpu_semaphore_pool {
99 */ 99 */
100 struct nvgpu_mem rw_mem; 100 struct nvgpu_mem rw_mem;
101 101
102 int mapped; 102 bool mapped;
103 103
104 /* 104 /*
105 * Sometimes a channel can be released before other channels are 105 * Sometimes a channel can be released before other channels are