summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c11
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.h10
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c11
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fb_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c50
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h14
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c20
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c14
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c7
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/sched_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/sync_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gm206/acr_gm206.c3
-rw-r--r--drivers/gpu/nvgpu/gm206/bios_gm206.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/fb_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/gm20b/fifo_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c6
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c8
-rw-r--r--drivers/gpu/nvgpu/vgpu/dbg_vgpu.c4
-rw-r--r--drivers/gpu/nvgpu/vgpu/fifo_vgpu.c9
-rw-r--r--include/uapi/linux/nvgpu.h2
38 files changed, 143 insertions, 128 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
index 8144ec6e..6cdbe3e1 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
@@ -263,7 +263,7 @@ static int gk20a_as_ioctl_get_va_regions(
263 unsigned int write_entries; 263 unsigned int write_entries;
264 struct nvgpu_as_va_region __user *user_region_ptr; 264 struct nvgpu_as_va_region __user *user_region_ptr;
265 struct vm_gk20a *vm = as_share->vm; 265 struct vm_gk20a *vm = as_share->vm;
266 int page_sizes = gmmu_page_size_kernel; 266 unsigned int page_sizes = gmmu_page_size_kernel;
267 267
268 gk20a_dbg_fn(""); 268 gk20a_dbg_fn("");
269 269
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 6b8af929..57b49f2c 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -47,7 +47,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g);
47 47
48static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx) 48static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
49{ 49{
50 int i; 50 unsigned int i;
51 51
52 for (i = 0; i < cde_ctx->num_bufs; i++) { 52 for (i = 0; i < cde_ctx->num_bufs; i++) {
53 struct mem_desc *mem = cde_ctx->mem + i; 53 struct mem_desc *mem = cde_ctx->mem + i;
@@ -361,7 +361,8 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx)
361 struct mem_desc *target_mem; 361 struct mem_desc *target_mem;
362 u32 *target_mem_ptr; 362 u32 *target_mem_ptr;
363 u64 new_data; 363 u64 new_data;
364 int user_id = 0, i, err; 364 int user_id = 0, err;
365 unsigned int i;
365 366
366 for (i = 0; i < cde_ctx->num_params; i++) { 367 for (i = 0; i < cde_ctx->num_params; i++) {
367 struct gk20a_cde_hdr_param *param = cde_ctx->params + i; 368 struct gk20a_cde_hdr_param *param = cde_ctx->params + i;
@@ -456,7 +457,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx,
456 } 457 }
457 458
458 target_mem = cde_ctx->mem + param->target_buf; 459 target_mem = cde_ctx->mem + param->target_buf;
459 if (target_mem->size< (param->target_byte_offset + 3)) { 460 if (target_mem->size < (param->target_byte_offset + 3)) {
460 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu", 461 gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu",
461 cde_ctx->num_params, param->target_byte_offset, 462 cde_ctx->num_params, param->target_byte_offset,
462 target_mem->size); 463 target_mem->size);
@@ -515,7 +516,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx,
515{ 516{
516 struct nvgpu_gpfifo **gpfifo, *gpfifo_elem; 517 struct nvgpu_gpfifo **gpfifo, *gpfifo_elem;
517 u32 *num_entries; 518 u32 *num_entries;
518 int i; 519 unsigned int i;
519 520
520 /* check command type */ 521 /* check command type */
521 if (op == TYPE_BUF_COMMAND_INIT) { 522 if (op == TYPE_BUF_COMMAND_INIT) {
@@ -615,7 +616,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx,
615 struct gk20a_cde_hdr_elem *elem; 616 struct gk20a_cde_hdr_elem *elem;
616 u32 min_size = 0; 617 u32 min_size = 0;
617 int err = 0; 618 int err = 0;
618 int i; 619 unsigned int i;
619 620
620 min_size += 2 * sizeof(u32); 621 min_size += 2 * sizeof(u32);
621 if (img->size < min_size) { 622 if (img->size < min_size) {
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
index 16d6b4ef..8cdba938 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h
@@ -67,8 +67,8 @@ struct gk20a_cde_hdr_replace {
67 u32 source_buf; 67 u32 source_buf;
68 s32 shift; 68 s32 shift;
69 u32 type; 69 u32 type;
70 s64 target_byte_offset; 70 u64 target_byte_offset;
71 s64 source_byte_offset; 71 u64 source_byte_offset;
72 u64 mask; 72 u64 mask;
73}; 73};
74 74
@@ -113,7 +113,7 @@ struct gk20a_cde_hdr_param {
113 s32 shift; 113 s32 shift;
114 u32 type; 114 u32 type;
115 s64 data_offset; 115 s64 data_offset;
116 s64 target_byte_offset; 116 u64 target_byte_offset;
117 u64 mask; 117 u64 mask;
118}; 118};
119 119
@@ -223,11 +223,11 @@ struct gk20a_cde_ctx {
223 223
224 /* buf converter configuration */ 224 /* buf converter configuration */
225 struct mem_desc mem[MAX_CDE_BUFS]; 225 struct mem_desc mem[MAX_CDE_BUFS];
226 int num_bufs; 226 unsigned int num_bufs;
227 227
228 /* buffer patching params (where should patching be done) */ 228 /* buffer patching params (where should patching be done) */
229 struct gk20a_cde_hdr_param params[MAX_CDE_PARAMS]; 229 struct gk20a_cde_hdr_param params[MAX_CDE_PARAMS];
230 int num_params; 230 unsigned int num_params;
231 231
232 /* storage for user space parameter values */ 232 /* storage for user space parameter values */
233 u32 user_param_values[MAX_CDE_USER_PARAMS]; 233 u32 user_param_values[MAX_CDE_USER_PARAMS];
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index bfd183fb..235bc027 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -211,10 +211,10 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
211 kfree(ce_ctx); 211 kfree(ce_ctx);
212} 212}
213 213
214static inline int gk20a_ce_get_method_size(int request_operation) 214static inline unsigned int gk20a_ce_get_method_size(int request_operation)
215{ 215{
216 /* failure size */ 216 /* failure size */
217 int methodsize = ~0; 217 unsigned int methodsize = UINT_MAX;
218 218
219 if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) 219 if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER)
220 methodsize = 10 * 2 * sizeof(u32); 220 methodsize = 10 * 2 * sizeof(u32);
@@ -518,7 +518,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
518 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED; 518 ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED;
519 519
520end: 520end:
521 if (ctx_id == ~0) { 521 if (ctx_id == (u32)~0) {
522 mutex_lock(&ce_app->app_mutex); 522 mutex_lock(&ce_app->app_mutex);
523 gk20a_ce_delete_gpu_context(ce_ctx); 523 gk20a_ce_delete_gpu_context(ce_ctx);
524 mutex_unlock(&ce_app->app_mutex); 524 mutex_unlock(&ce_app->app_mutex);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 43a6df0e..8105de11 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -223,7 +223,7 @@ u32 channel_gk20a_pbdma_acquire_val(struct channel_gk20a *c)
223{ 223{
224 u32 val, exp, man; 224 u32 val, exp, man;
225 u64 timeout; 225 u64 timeout;
226 int val_len; 226 unsigned int val_len;
227 227
228 val = pbdma_acquire_retry_man_2_f() | 228 val = pbdma_acquire_retry_man_2_f() |
229 pbdma_acquire_retry_exp_2_f(); 229 pbdma_acquire_retry_exp_2_f();
@@ -238,7 +238,7 @@ u32 channel_gk20a_pbdma_acquire_val(struct channel_gk20a *c)
238 val_len = fls(timeout >> 32) + 32; 238 val_len = fls(timeout >> 32) + 32;
239 if (val_len == 32) 239 if (val_len == 32)
240 val_len = fls(timeout); 240 val_len = fls(timeout);
241 if (val_len > 16 + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */ 241 if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */
242 exp = pbdma_acquire_timeout_exp_max_v(); 242 exp = pbdma_acquire_timeout_exp_max_v();
243 man = pbdma_acquire_timeout_man_max_v(); 243 man = pbdma_acquire_timeout_man_max_v();
244 } else if (val_len > 16) { 244 } else if (val_len > 16) {
@@ -1618,7 +1618,8 @@ bool channel_gk20a_is_prealloc_enabled(struct channel_gk20a *c)
1618static int channel_gk20a_prealloc_resources(struct channel_gk20a *c, 1618static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
1619 unsigned int num_jobs) 1619 unsigned int num_jobs)
1620{ 1620{
1621 int i, err; 1621 unsigned int i;
1622 int err;
1622 size_t size; 1623 size_t size;
1623 struct priv_cmd_entry *entries = NULL; 1624 struct priv_cmd_entry *entries = NULL;
1624 1625
@@ -3044,7 +3045,7 @@ const struct file_operations gk20a_event_id_ops = {
3044}; 3045};
3045 3046
3046static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch, 3047static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
3047 int event_id, 3048 u32 event_id,
3048 struct gk20a_event_id_data **event_id_data) 3049 struct gk20a_event_id_data **event_id_data)
3049{ 3050{
3050 struct gk20a_event_id_data *local_event_id_data; 3051 struct gk20a_event_id_data *local_event_id_data;
@@ -3069,7 +3070,7 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch,
3069} 3070}
3070 3071
3071void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, 3072void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
3072 int event_id) 3073 u32 event_id)
3073{ 3074{
3074 struct gk20a_event_id_data *event_id_data; 3075 struct gk20a_event_id_data *event_id_data;
3075 int err = 0; 3076 int err = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 832e03e9..66052950 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -188,7 +188,7 @@ struct channel_gk20a {
188 bool has_timedout; 188 bool has_timedout;
189 u32 timeout_ms_max; 189 u32 timeout_ms_max;
190 bool timeout_debug_dump; 190 bool timeout_debug_dump;
191 u32 timeslice_us; 191 unsigned int timeslice_us;
192 192
193 struct dma_buf *error_notifier_ref; 193 struct dma_buf *error_notifier_ref;
194 struct nvgpu_notification *error_notifier; 194 struct nvgpu_notification *error_notifier;
@@ -309,11 +309,11 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g,
309 int timeslice_period, 309 int timeslice_period,
310 int *__timeslice_timeout, int *__timeslice_scale); 310 int *__timeslice_timeout, int *__timeslice_scale);
311int gk20a_channel_set_priority(struct channel_gk20a *ch, u32 priority); 311int gk20a_channel_set_priority(struct channel_gk20a *ch, u32 priority);
312int gk20a_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice); 312int gk20a_channel_set_timeslice(struct channel_gk20a *ch, unsigned int timeslice);
313int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch, 313int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
314 u32 level); 314 u32 level);
315void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, 315void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
316 int event_id); 316 u32 event_id);
317 317
318void gk20a_channel_setup_ramfc_for_privileged_channel(struct channel_gk20a *c); 318void gk20a_channel_setup_ramfc_for_privileged_channel(struct channel_gk20a *c);
319 319
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index febea719..ba8fbc98 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -192,7 +192,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
192 bool need_sync_fence) 192 bool need_sync_fence)
193{ 193{
194 u32 thresh; 194 u32 thresh;
195 int incr_cmd_size; 195 size_t incr_cmd_size;
196 int off; 196 int off;
197 int err; 197 int err;
198 struct gk20a_channel_syncpt *sp = 198 struct gk20a_channel_syncpt *sp =
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
index 493cbe80..56bc2c7a 100644
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
@@ -581,7 +581,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
581 struct nvgpu_gpu_get_cpu_time_correlation_info_args *args) 581 struct nvgpu_gpu_get_cpu_time_correlation_info_args *args)
582{ 582{
583 int err = 0; 583 int err = 0;
584 int i = 0; 584 unsigned int i = 0;
585 u32 gpu_timestamp_hi_new = 0; 585 u32 gpu_timestamp_hi_new = 0;
586 u32 gpu_timestamp_hi_old = 0; 586 u32 gpu_timestamp_hi_old = 0;
587 587
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index 9a84e2e3..b84db933 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -177,7 +177,7 @@ void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o)
177{ 177{
178 struct fifo_gk20a *f = &g->fifo; 178 struct fifo_gk20a *f = &g->fifo;
179 u32 chid; 179 u32 chid;
180 int i; 180 unsigned int i;
181 181
182 struct ch_state **ch_state; 182 struct ch_state **ch_state;
183 183
diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
index db02ec7b..2fb7f64b 100644
--- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c
@@ -51,12 +51,12 @@ static void gk20a_fb_set_mmu_page_size(struct gk20a *g)
51 gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl); 51 gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl);
52} 52}
53 53
54static int gk20a_fb_compression_page_size(struct gk20a *g) 54static unsigned int gk20a_fb_compression_page_size(struct gk20a *g)
55{ 55{
56 return SZ_128K; 56 return SZ_128K;
57} 57}
58 58
59static int gk20a_fb_compressible_page_size(struct gk20a *g) 59static unsigned int gk20a_fb_compressible_page_size(struct gk20a *g)
60{ 60{
61 return SZ_64K; 61 return SZ_64K;
62} 62}
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
index e046152d..323caa8f 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -114,14 +114,14 @@ int gk20a_fence_install_fd(struct gk20a_fence *f)
114#endif 114#endif
115} 115}
116 116
117int gk20a_alloc_fence_pool(struct channel_gk20a *c, int count) 117int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count)
118{ 118{
119 int err; 119 int err;
120 size_t size; 120 size_t size;
121 struct gk20a_fence *fence_pool = NULL; 121 struct gk20a_fence *fence_pool = NULL;
122 122
123 size = sizeof(struct gk20a_fence); 123 size = sizeof(struct gk20a_fence);
124 if (count <= ULONG_MAX / size) { 124 if (count <= UINT_MAX / size) {
125 size = count * size; 125 size = count * size;
126 fence_pool = vzalloc(size); 126 fence_pool = vzalloc(size);
127 } 127 }
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
index 97a7d957..beba761a 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
@@ -67,7 +67,7 @@ int gk20a_fence_from_syncpt(
67 67
68int gk20a_alloc_fence_pool( 68int gk20a_alloc_fence_pool(
69 struct channel_gk20a *c, 69 struct channel_gk20a *c,
70 int size); 70 unsigned int count);
71 71
72void gk20a_free_fence_pool( 72void gk20a_free_fence_pool(
73 struct channel_gk20a *c); 73 struct channel_gk20a *c);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 9887b68f..68394da5 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -252,7 +252,7 @@ bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
252 252
253static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) 253static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
254{ 254{
255 u32 fault_id = ~0; 255 u32 fault_id = FIFO_INVAL_ENGINE_ID;
256 struct fifo_engine_info_gk20a *engine_info; 256 struct fifo_engine_info_gk20a *engine_info;
257 257
258 engine_info = gk20a_fifo_get_engine_info(g, engine_id); 258 engine_info = gk20a_fifo_get_engine_info(g, engine_id);
@@ -312,7 +312,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
312 u32 i; 312 u32 i;
313 u32 max_info_entries = top_device_info__size_1_v(); 313 u32 max_info_entries = top_device_info__size_1_v();
314 u32 engine_enum = ENGINE_INVAL_GK20A; 314 u32 engine_enum = ENGINE_INVAL_GK20A;
315 u32 engine_id = ~0; 315 u32 engine_id = FIFO_INVAL_ENGINE_ID;
316 u32 runlist_id = ~0; 316 u32 runlist_id = ~0;
317 u32 pbdma_id = ~0; 317 u32 pbdma_id = ~0;
318 u32 intr_id = ~0; 318 u32 intr_id = ~0;
@@ -428,7 +428,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
428u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) 428u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g)
429{ 429{
430 u32 eng_intr_mask = 0; 430 u32 eng_intr_mask = 0;
431 int i; 431 unsigned int i;
432 u32 active_engine_id = 0; 432 u32 active_engine_id = 0;
433 u32 engine_enum = ENGINE_INVAL_GK20A; 433 u32 engine_enum = ENGINE_INVAL_GK20A;
434 434
@@ -588,7 +588,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
588{ 588{
589 struct fifo_runlist_info_gk20a *runlist; 589 struct fifo_runlist_info_gk20a *runlist;
590 struct device *d = dev_from_gk20a(g); 590 struct device *d = dev_from_gk20a(g);
591 s32 runlist_id = -1; 591 unsigned int runlist_id;
592 u32 i; 592 u32 i;
593 size_t runlist_size; 593 size_t runlist_size;
594 594
@@ -653,7 +653,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
653 u32 intr_stall; 653 u32 intr_stall;
654 u32 mask; 654 u32 mask;
655 u32 timeout; 655 u32 timeout;
656 int i; 656 unsigned int i;
657 struct gk20a_platform *platform = dev_get_drvdata(g->dev); 657 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
658 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); 658 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
659 659
@@ -777,7 +777,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
777{ 777{
778 struct fifo_gk20a *f = &g->fifo; 778 struct fifo_gk20a *f = &g->fifo;
779 struct device *d = dev_from_gk20a(g); 779 struct device *d = dev_from_gk20a(g);
780 int chid, i, err = 0; 780 unsigned int chid, i;
781 int err = 0;
781 782
782 gk20a_dbg_fn(""); 783 gk20a_dbg_fn("");
783 784
@@ -974,7 +975,7 @@ static struct channel_gk20a *
974channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr) 975channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr)
975{ 976{
976 struct gk20a *g = f->g; 977 struct gk20a *g = f->g;
977 int ci; 978 unsigned int ci;
978 if (unlikely(!f->channel)) 979 if (unlikely(!f->channel))
979 return NULL; 980 return NULL;
980 for (ci = 0; ci < f->num_channels; ci++) { 981 for (ci = 0; ci < f->num_channels; ci++) {
@@ -1461,7 +1462,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1461 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, 1462 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
1462 "sm debugger attached," 1463 "sm debugger attached,"
1463 " deferring channel recovery to channel free"); 1464 " deferring channel recovery to channel free");
1464 } else if (engine_id != ~0) { 1465 } else if (engine_id != FIFO_INVAL_ENGINE_ID) {
1465 was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex); 1466 was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex);
1466 mutex_lock(&g->fifo.gr_reset_mutex); 1467 mutex_lock(&g->fifo.gr_reset_mutex);
1467 /* if lock is already taken, a reset is taking place 1468 /* if lock is already taken, a reset is taking place
@@ -1565,7 +1566,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g,
1565 } 1566 }
1566 1567
1567 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); 1568 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id);
1568 if (mmu_id != ~0) 1569 if (mmu_id != FIFO_INVAL_ENGINE_ID)
1569 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 1570 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id),
1570 fifo_trigger_mmu_fault_id_f(mmu_id) | 1571 fifo_trigger_mmu_fault_id_f(mmu_id) |
1571 fifo_trigger_mmu_fault_enable_f(1)); 1572 fifo_trigger_mmu_fault_enable_f(1));
@@ -1595,7 +1596,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g,
1595 1596
1596static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) 1597static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
1597{ 1598{
1598 int i; 1599 unsigned int i;
1599 u32 engines = 0; 1600 u32 engines = 0;
1600 1601
1601 for (i = 0; i < g->fifo.num_engines; i++) { 1602 for (i = 0; i < g->fifo.num_engines; i++) {
@@ -1712,7 +1713,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1712 for_each_set_bit(engine_id, &engine_ids, 32) { 1713 for_each_set_bit(engine_id, &engine_ids, 32) {
1713 u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); 1714 u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id);
1714 1715
1715 if (mmu_id != ~0) 1716 if (mmu_id != FIFO_INVAL_ENGINE_ID)
1716 mmu_fault_engines |= BIT(mmu_id); 1717 mmu_fault_engines |= BIT(mmu_id);
1717 } 1718 }
1718 } else { 1719 } else {
@@ -1736,7 +1737,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1736 u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); 1737 u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id);
1737 1738
1738 engine_ids |= BIT(active_engine_id); 1739 engine_ids |= BIT(active_engine_id);
1739 if (mmu_id != ~0) 1740 if (mmu_id != FIFO_INVAL_ENGINE_ID)
1740 mmu_fault_engines |= BIT(mmu_id); 1741 mmu_fault_engines |= BIT(mmu_id);
1741 } 1742 }
1742 } 1743 }
@@ -2063,7 +2064,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
2063 && print_channel_reset_log; 2064 && print_channel_reset_log;
2064 2065
2065 if (print_channel_reset_log) { 2066 if (print_channel_reset_log) {
2066 int engine_id; 2067 unsigned int engine_id;
2067 gk20a_err(dev_from_gk20a(g), 2068 gk20a_err(dev_from_gk20a(g),
2068 "channel reset initiated from %s; intr=0x%08x", 2069 "channel reset initiated from %s; intr=0x%08x",
2069 __func__, fifo_intr); 2070 __func__, fifo_intr);
@@ -2497,7 +2498,7 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g,
2497 2498
2498int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) 2499int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
2499{ 2500{
2500 int i; 2501 unsigned int i;
2501 int err = 0, ret = 0; 2502 int err = 0, ret = 0;
2502 2503
2503 for (i = 0; i < g->fifo.num_engines; i++) { 2504 for (i = 0; i < g->fifo.num_engines; i++) {
@@ -2519,7 +2520,8 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2519 bool wait_for_idle) 2520 bool wait_for_idle)
2520{ 2521{
2521 u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; 2522 u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
2522 u32 pbdma_chid = ~0, engine_chid = ~0, disable; 2523 u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID;
2524 u32 engine_chid = FIFO_INVAL_CHANNEL_ID, disable;
2523 u32 token = PMU_INVALID_MUTEX_OWNER_ID; 2525 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
2524 u32 mutex_ret; 2526 u32 mutex_ret;
2525 u32 err = 0; 2527 u32 err = 0;
@@ -2551,7 +2553,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2551 chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v()) 2553 chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v())
2552 pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat); 2554 pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat);
2553 2555
2554 if (pbdma_chid != ~0) { 2556 if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) {
2555 err = g->ops.fifo.preempt_channel(g, pbdma_chid); 2557 err = g->ops.fifo.preempt_channel(g, pbdma_chid);
2556 if (err) 2558 if (err)
2557 goto clean_up; 2559 goto clean_up;
@@ -2567,7 +2569,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
2567 ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v()) 2569 ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v())
2568 engine_chid = fifo_engine_status_next_id_v(eng_stat); 2570 engine_chid = fifo_engine_status_next_id_v(eng_stat);
2569 2571
2570 if (engine_chid != ~0 && engine_chid != pbdma_chid) { 2572 if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) {
2571 err = g->ops.fifo.preempt_channel(g, engine_chid); 2573 err = g->ops.fifo.preempt_channel(g, engine_chid);
2572 if (err) 2574 if (err)
2573 goto clean_up; 2575 goto clean_up;
@@ -2591,7 +2593,7 @@ clean_up:
2591int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, 2593int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2592 bool wait_for_idle) 2594 bool wait_for_idle)
2593{ 2595{
2594 int i; 2596 unsigned int i;
2595 int err = 0, ret = 0; 2597 int err = 0, ret = 0;
2596 u32 active_engine_id; 2598 u32 active_engine_id;
2597 2599
@@ -2609,7 +2611,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2609 } 2611 }
2610 2612
2611 if (err) { 2613 if (err) {
2612 while (--i >= 0) { 2614 while (i-- != 0) {
2613 active_engine_id = g->fifo.active_engines_list[i]; 2615 active_engine_id = g->fifo.active_engines_list[i];
2614 err = gk20a_fifo_enable_engine_activity(g, 2616 err = gk20a_fifo_enable_engine_activity(g,
2615 &g->fifo.engine_info[active_engine_id]); 2617 &g->fifo.engine_info[active_engine_id]);
@@ -2626,7 +2628,7 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
2626{ 2628{
2627 struct fifo_gk20a *f = &g->fifo; 2629 struct fifo_gk20a *f = &g->fifo;
2628 u32 engines = 0; 2630 u32 engines = 0;
2629 int i; 2631 unsigned int i;
2630 2632
2631 for (i = 0; i < f->num_engines; i++) { 2633 for (i = 0; i < f->num_engines; i++) {
2632 u32 active_engine_id = g->fifo.active_engines_list[i]; 2634 u32 active_engine_id = g->fifo.active_engines_list[i];
@@ -2852,7 +2854,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
2852 u32 hw_chid, bool add, 2854 u32 hw_chid, bool add,
2853 bool wait_for_finish) 2855 bool wait_for_finish)
2854{ 2856{
2855 u32 ret = 0; 2857 int ret = 0;
2856 struct fifo_gk20a *f = &g->fifo; 2858 struct fifo_gk20a *f = &g->fifo;
2857 struct fifo_runlist_info_gk20a *runlist = NULL; 2859 struct fifo_runlist_info_gk20a *runlist = NULL;
2858 u32 *runlist_entry_base = NULL; 2860 u32 *runlist_entry_base = NULL;
@@ -2867,7 +2869,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
2867 2869
2868 /* valid channel, add/remove it from active list. 2870 /* valid channel, add/remove it from active list.
2869 Otherwise, keep active list untouched for suspend/resume. */ 2871 Otherwise, keep active list untouched for suspend/resume. */
2870 if (hw_chid != ~0) { 2872 if (hw_chid != FIFO_INVAL_CHANNEL_ID) {
2871 ch = &f->channel[hw_chid]; 2873 ch = &f->channel[hw_chid];
2872 if (gk20a_is_channel_marked_as_tsg(ch)) 2874 if (gk20a_is_channel_marked_as_tsg(ch))
2873 tsg = &f->tsg[ch->tsgid]; 2875 tsg = &f->tsg[ch->tsgid];
@@ -2909,7 +2911,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
2909 goto clean_up; 2911 goto clean_up;
2910 } 2912 }
2911 2913
2912 if (hw_chid != ~0 || /* add/remove a valid channel */ 2914 if (hw_chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */
2913 add /* resume to add all channels back */) { 2915 add /* resume to add all channels back */) {
2914 u32 max_entries = f->num_runlist_entries; 2916 u32 max_entries = f->num_runlist_entries;
2915 u32 *runlist_end; 2917 u32 *runlist_end;
@@ -3055,7 +3057,7 @@ bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
3055 3057
3056bool gk20a_fifo_is_engine_busy(struct gk20a *g) 3058bool gk20a_fifo_is_engine_busy(struct gk20a *g)
3057{ 3059{
3058 int i; 3060 unsigned int i;
3059 3061
3060 for (i = 0; i < fifo_engine_status__size_1_v(); i++) { 3062 for (i = 0; i < fifo_engine_status__size_1_v(); i++) {
3061 u32 status = gk20a_readl(g, fifo_engine_status_r(i)); 3063 u32 status = gk20a_readl(g, fifo_engine_status_r(i));
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 64bdeabb..c32142e3 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -26,7 +26,9 @@
26 26
27#define MAX_RUNLIST_BUFFERS 2 27#define MAX_RUNLIST_BUFFERS 2
28 28
29#define FIFO_INVAL_ENGINE_ID ~0 29#define FIFO_INVAL_ENGINE_ID ((u32)~0)
30#define FIFO_INVAL_CHANNEL_ID ((u32)~0)
31#define FIFO_INVAL_TSG_ID ((u32)~0)
30 32
31/* generally corresponds to the "pbdma" engine */ 33/* generally corresponds to the "pbdma" engine */
32 34
@@ -96,11 +98,11 @@ struct fifo_engine_info_gk20a {
96 98
97struct fifo_gk20a { 99struct fifo_gk20a {
98 struct gk20a *g; 100 struct gk20a *g;
99 int num_channels; 101 unsigned int num_channels;
100 int runlist_entry_size; 102 unsigned int runlist_entry_size;
101 int num_runlist_entries; 103 unsigned int num_runlist_entries;
102 104
103 int num_pbdma; 105 unsigned int num_pbdma;
104 u32 *pbdma_map; 106 u32 *pbdma_map;
105 107
106 struct fifo_engine_info_gk20a *engine_info; 108 struct fifo_engine_info_gk20a *engine_info;
@@ -114,7 +116,7 @@ struct fifo_gk20a {
114 struct mem_desc userd; 116 struct mem_desc userd;
115 u32 userd_entry_size; 117 u32 userd_entry_size;
116 118
117 int used_channels; 119 unsigned int used_channels;
118 struct channel_gk20a *channel; 120 struct channel_gk20a *channel;
119 /* zero-kref'd channels here */ 121 /* zero-kref'd channels here */
120 struct list_head free_chs; 122 struct list_head free_chs;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index ffddebe7..56b05e94 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -322,8 +322,8 @@ struct gpu_ops {
322 void (*init_kind_attr)(struct gk20a *g); 322 void (*init_kind_attr)(struct gk20a *g);
323 void (*set_mmu_page_size)(struct gk20a *g); 323 void (*set_mmu_page_size)(struct gk20a *g);
324 bool (*set_use_full_comp_tag_line)(struct gk20a *g); 324 bool (*set_use_full_comp_tag_line)(struct gk20a *g);
325 int (*compression_page_size)(struct gk20a *g); 325 unsigned int (*compression_page_size)(struct gk20a *g);
326 int (*compressible_page_size)(struct gk20a *g); 326 unsigned int (*compressible_page_size)(struct gk20a *g);
327 void (*dump_vpr_wpr_info)(struct gk20a *g); 327 void (*dump_vpr_wpr_info)(struct gk20a *g);
328 } fb; 328 } fb;
329 struct { 329 struct {
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c b/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c
index ab0fbc64..06c33a8c 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c
@@ -842,7 +842,7 @@ static const struct gk20a_allocator_ops page_ops = {
842static int gk20a_page_alloc_init_slabs(struct gk20a_page_allocator *a) 842static int gk20a_page_alloc_init_slabs(struct gk20a_page_allocator *a)
843{ 843{
844 size_t nr_slabs = ilog2(a->page_size >> 12); 844 size_t nr_slabs = ilog2(a->page_size >> 12);
845 int i; 845 unsigned int i;
846 846
847 a->slabs = kcalloc(nr_slabs, 847 a->slabs = kcalloc(nr_slabs,
848 sizeof(struct page_alloc_slab), 848 sizeof(struct page_alloc_slab),
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index e6103479..ee8b3b63 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -124,7 +124,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
124 124
125void gk20a_fecs_dump_falcon_stats(struct gk20a *g) 125void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
126{ 126{
127 int i; 127 unsigned int i;
128 128
129 gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d", 129 gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d",
130 gk20a_readl(g, gr_fecs_os_r())); 130 gk20a_readl(g, gr_fecs_os_r()));
@@ -1395,9 +1395,9 @@ int gr_gk20a_init_fs_state(struct gk20a *g)
1395 1395
1396 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); 1396 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0);
1397 if (g->tpc_fs_mask_user && 1397 if (g->tpc_fs_mask_user &&
1398 fuse_tpc_mask == (0x1 << gr->max_tpc_count) - 1) { 1398 fuse_tpc_mask == (0x1U << gr->max_tpc_count) - 1U) {
1399 u32 val = g->tpc_fs_mask_user; 1399 u32 val = g->tpc_fs_mask_user;
1400 val &= (0x1 << gr->max_tpc_count) - 1; 1400 val &= (0x1U << gr->max_tpc_count) - 1U;
1401 gk20a_writel(g, gr_cwd_fs_r(), 1401 gk20a_writel(g, gr_cwd_fs_r(),
1402 gr_cwd_fs_num_gpcs_f(gr->gpc_count) | 1402 gr_cwd_fs_num_gpcs_f(gr->gpc_count) |
1403 gr_cwd_fs_num_tpcs_f(hweight32(val))); 1403 gr_cwd_fs_num_tpcs_f(hweight32(val)));
@@ -1444,7 +1444,7 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g)
1444 struct av_list_gk20a *sw_bundle_init = &g->gr.ctx_vars.sw_bundle_init; 1444 struct av_list_gk20a *sw_bundle_init = &g->gr.ctx_vars.sw_bundle_init;
1445 u32 last_bundle_data = 0; 1445 u32 last_bundle_data = 0;
1446 u32 err = 0; 1446 u32 err = 0;
1447 int i; 1447 unsigned int i;
1448 unsigned long end_jiffies = jiffies + 1448 unsigned long end_jiffies = jiffies +
1449 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); 1449 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1450 1450
@@ -2110,7 +2110,7 @@ static int gr_gk20a_copy_ctxsw_ucode_segments(
2110 u32 *bootimage, 2110 u32 *bootimage,
2111 u32 *code, u32 *data) 2111 u32 *code, u32 *data)
2112{ 2112{
2113 int i; 2113 unsigned int i;
2114 2114
2115 gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage, 2115 gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage,
2116 segments->boot.size); 2116 segments->boot.size);
@@ -4048,7 +4048,8 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4048 4048
4049static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) 4049static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
4050{ 4050{
4051 int i, ret; 4051 unsigned int i;
4052 int ret;
4052 4053
4053 for (i = 0; i < gr->max_used_color_index; i++) { 4054 for (i = 0; i < gr->max_used_color_index; i++) {
4054 struct zbc_color_table *c_tbl = &gr->zbc_col_tbl[i]; 4055 struct zbc_color_table *c_tbl = &gr->zbc_col_tbl[i];
@@ -4898,7 +4899,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
4898 DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size, 4899 DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
4899 PAGE_SIZE); 4900 PAGE_SIZE);
4900 u32 *whitelist = NULL; 4901 u32 *whitelist = NULL;
4901 int num_entries = 0; 4902 unsigned int num_entries = 0;
4902 4903
4903 if (gk20a_mem_begin(g, mem)) { 4904 if (gk20a_mem_begin(g, mem)) {
4904 gk20a_err(dev_from_gk20a(g), 4905 gk20a_err(dev_from_gk20a(g),
@@ -6996,7 +6997,7 @@ static void gr_gk20a_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset)
6996 gk20a_writel(g, gpc_tpc_addr, reg); 6997 gk20a_writel(g, gpc_tpc_addr, reg);
6997} 6998}
6998 6999
6999#define ILLEGAL_ID (~0) 7000#define ILLEGAL_ID ((u32)~0)
7000 7001
7001static inline bool check_main_image_header_magic(u8 *context) 7002static inline bool check_main_image_header_magic(u8 *context)
7002{ 7003{
@@ -8762,7 +8763,8 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
8762 struct channel_gk20a *ch, u64 sms, bool enable) 8763 struct channel_gk20a *ch, u64 sms, bool enable)
8763{ 8764{
8764 struct nvgpu_dbg_gpu_reg_op *ops; 8765 struct nvgpu_dbg_gpu_reg_op *ops;
8765 int i = 0, sm_id, err; 8766 unsigned int i = 0, sm_id;
8767 int err;
8766 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 8768 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
8767 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 8769 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
8768 8770
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index d03f945c..662d9a87 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -334,8 +334,8 @@ struct gr_gk20a {
334 s32 max_default_color_index; 334 s32 max_default_color_index;
335 s32 max_default_depth_index; 335 s32 max_default_depth_index;
336 336
337 s32 max_used_color_index; 337 u32 max_used_color_index;
338 s32 max_used_depth_index; 338 u32 max_used_depth_index;
339 339
340#define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */ 340#define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */
341 struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE]; 341 struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE];
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 9906b77b..fcd5d664 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -789,10 +789,10 @@ static void gk20a_remove_mm_ce_support(struct mm_gk20a *mm)
789 struct gk20a *g = gk20a_from_mm(mm); 789 struct gk20a *g = gk20a_from_mm(mm);
790 struct gk20a_platform *platform = gk20a_get_platform(g->dev); 790 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
791 791
792 if (mm->vidmem.ce_ctx_id != ~0) 792 if (mm->vidmem.ce_ctx_id != (u32)~0)
793 gk20a_ce_delete_context(g->dev, mm->vidmem.ce_ctx_id); 793 gk20a_ce_delete_context(g->dev, mm->vidmem.ce_ctx_id);
794 794
795 mm->vidmem.ce_ctx_id = ~0; 795 mm->vidmem.ce_ctx_id = (u32)~0;
796 796
797 if (platform->has_ce) 797 if (platform->has_ce)
798 gk20a_vm_remove_support_nofree(&mm->ce.vm); 798 gk20a_vm_remove_support_nofree(&mm->ce.vm);
@@ -836,7 +836,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g)
836 u64 region2_base = 0; 836 u64 region2_base = 0;
837 int err = 0; 837 int err = 0;
838 838
839 if (mm->vidmem.ce_ctx_id == ~0) 839 if (mm->vidmem.ce_ctx_id == (u32)~0)
840 return -EINVAL; 840 return -EINVAL;
841 841
842 err = gk20a_ce_execute_ops(g->dev, 842 err = gk20a_ce_execute_ops(g->dev,
@@ -989,7 +989,7 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
989 989
990 gk20a_init_pramin(mm); 990 gk20a_init_pramin(mm);
991 991
992 mm->vidmem.ce_ctx_id = ~0; 992 mm->vidmem.ce_ctx_id = (u32)~0;
993 993
994 err = gk20a_init_vidmem(mm); 994 err = gk20a_init_vidmem(mm);
995 if (err) 995 if (err)
@@ -1119,7 +1119,7 @@ int gk20a_init_mm_support(struct gk20a *g)
1119void gk20a_init_mm_ce_context(struct gk20a *g) 1119void gk20a_init_mm_ce_context(struct gk20a *g)
1120{ 1120{
1121#if defined(CONFIG_GK20A_VIDMEM) 1121#if defined(CONFIG_GK20A_VIDMEM)
1122 if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == ~0)) { 1122 if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == (u32)~0)) {
1123 g->mm.vidmem.ce_ctx_id = 1123 g->mm.vidmem.ce_ctx_id =
1124 gk20a_ce_create_context_with_cb(g->dev, 1124 gk20a_ce_create_context_with_cb(g->dev,
1125 gk20a_fifo_get_fast_ce_runlist_id(g), 1125 gk20a_fifo_get_fast_ce_runlist_id(g),
@@ -1128,7 +1128,7 @@ void gk20a_init_mm_ce_context(struct gk20a *g)
1128 -1, 1128 -1,
1129 NULL); 1129 NULL);
1130 1130
1131 if (g->mm.vidmem.ce_ctx_id == ~0) 1131 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
1132 gk20a_err(g->dev, 1132 gk20a_err(g->dev,
1133 "Failed to allocate CE context for vidmem page clearing support"); 1133 "Failed to allocate CE context for vidmem page clearing support");
1134 } 1134 }
@@ -3021,7 +3021,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
3021 struct page_alloc_chunk *chunk = NULL; 3021 struct page_alloc_chunk *chunk = NULL;
3022 int err = 0; 3022 int err = 0;
3023 3023
3024 if (g->mm.vidmem.ce_ctx_id == ~0) 3024 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
3025 return -EINVAL; 3025 return -EINVAL;
3026 3026
3027 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 3027 alloc = get_vidmem_page_alloc(mem->sgt->sgl);
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
index f13a11ea..3d5cd1b2 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
@@ -56,7 +56,7 @@ struct gk20a_platform {
56 bool has_syncpoints; 56 bool has_syncpoints;
57 57
58 /* channel limit after which to start aggressive sync destroy */ 58 /* channel limit after which to start aggressive sync destroy */
59 int aggressive_sync_destroy_thresh; 59 unsigned int aggressive_sync_destroy_thresh;
60 60
61 /* flag to set sync destroy aggressiveness */ 61 /* flag to set sync destroy aggressiveness */
62 bool aggressive_sync_destroy; 62 bool aggressive_sync_destroy;
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
index 35d524f1..01ff5f96 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
@@ -58,8 +58,8 @@
58extern struct device tegra_vpr_dev; 58extern struct device tegra_vpr_dev;
59 59
60struct gk20a_emc_params { 60struct gk20a_emc_params {
61 long bw_ratio; 61 unsigned long bw_ratio;
62 long freq_last_set; 62 unsigned long freq_last_set;
63}; 63};
64 64
65static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); 65static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
@@ -217,7 +217,7 @@ static void gk20a_tegra_postscale(struct device *dev,
217 struct clk *emc_clk = platform->clk[2]; 217 struct clk *emc_clk = platform->clk[2];
218 enum tegra_chipid chip_id = tegra_get_chip_id(); 218 enum tegra_chipid chip_id = tegra_get_chip_id();
219 unsigned long emc_target; 219 unsigned long emc_target;
220 long emc_freq_lower, emc_freq_upper, emc_freq_rounded; 220 unsigned long emc_freq_lower, emc_freq_upper, emc_freq_rounded;
221 221
222 emc_target = gk20a_tegra_get_emc_rate(g, emc_params); 222 emc_target = gk20a_tegra_get_emc_rate(g, emc_params);
223 223
@@ -234,8 +234,10 @@ static void gk20a_tegra_postscale(struct device *dev,
234 break; 234 break;
235 235
236 case TEGRA_CHIPID_TEGRA21: 236 case TEGRA_CHIPID_TEGRA21:
237 emc_freq_lower = tegra_emc_round_rate_updown(emc_target, false); 237 emc_freq_lower = (unsigned long)
238 emc_freq_upper = tegra_emc_round_rate_updown(emc_target, true); 238 tegra_emc_round_rate_updown(emc_target, false);
239 emc_freq_upper = (unsigned long)
240 tegra_emc_round_rate_updown(emc_target, true);
239 241
240 /* round to the nearest frequency step */ 242 /* round to the nearest frequency step */
241 if (emc_target < (emc_freq_lower + emc_freq_upper) / 2) 243 if (emc_target < (emc_freq_lower + emc_freq_upper) / 2)
@@ -645,7 +647,7 @@ static int gk20a_tegra_get_clocks(struct device *dev)
645{ 647{
646 struct gk20a_platform *platform = dev_get_drvdata(dev); 648 struct gk20a_platform *platform = dev_get_drvdata(dev);
647 char devname[16]; 649 char devname[16];
648 int i; 650 unsigned int i;
649 int ret = 0; 651 int ret = 0;
650 652
651 BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks)); 653 BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks));
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index bfa3902e..193938ba 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2709,7 +2709,7 @@ static bool pmu_queue_has_room(struct pmu_gk20a *pmu,
2709{ 2709{
2710 u32 head, tail; 2710 u32 head, tail;
2711 bool rewind = false; 2711 bool rewind = false;
2712 int free; 2712 unsigned int free;
2713 2713
2714 size = ALIGN(size, QUEUE_ALIGNMENT); 2714 size = ALIGN(size, QUEUE_ALIGNMENT);
2715 2715
@@ -2955,7 +2955,8 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
2955 struct mm_gk20a *mm = &g->mm; 2955 struct mm_gk20a *mm = &g->mm;
2956 struct vm_gk20a *vm = &mm->pmu.vm; 2956 struct vm_gk20a *vm = &mm->pmu.vm;
2957 struct device *d = dev_from_gk20a(g); 2957 struct device *d = dev_from_gk20a(g);
2958 int i, err = 0; 2958 unsigned int i;
2959 int err = 0;
2959 u8 *ptr; 2960 u8 *ptr;
2960 2961
2961 gk20a_dbg_fn(""); 2962 gk20a_dbg_fn("");
@@ -4128,7 +4129,7 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu)
4128void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) 4129void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
4129{ 4130{
4130 struct gk20a *g = gk20a_from_pmu(pmu); 4131 struct gk20a *g = gk20a_from_pmu(pmu);
4131 int i; 4132 unsigned int i;
4132 4133
4133 gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d", 4134 gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d",
4134 gk20a_readl(g, pwr_falcon_os_r())); 4135 gk20a_readl(g, pwr_falcon_os_r()));
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index 8b87b523..b0754070 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -35,7 +35,7 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
35 if (key < prange->base) 35 if (key < prange->base)
36 return -1; 36 return -1;
37 else if (prange->base <= key && key < (prange->base + 37 else if (prange->base <= key && key < (prange->base +
38 (prange->count * 4))) 38 (prange->count * 4U)))
39 return 0; 39 return 0;
40 return 1; 40 return 1;
41} 41}
@@ -379,7 +379,8 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
379 struct nvgpu_dbg_gpu_reg_op *ops, 379 struct nvgpu_dbg_gpu_reg_op *ops,
380 u64 num_ops) 380 u64 num_ops)
381{ 381{
382 int err = 0, i; 382 int err = 0;
383 unsigned int i;
383 struct channel_gk20a *ch = NULL; 384 struct channel_gk20a *ch = NULL;
384 struct gk20a *g = dbg_s->g; 385 struct gk20a *g = dbg_s->g;
385 /*struct gr_gk20a *gr = &g->gr;*/ 386 /*struct gr_gk20a *gr = &g->gr;*/
@@ -799,7 +800,8 @@ static int gk20a_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
799 * it was already swapped out in/out once or not, etc. 800 * it was already swapped out in/out once or not, etc.
800 */ 801 */
801 struct nvgpu_dbg_gpu_reg_op ops[4]; 802 struct nvgpu_dbg_gpu_reg_op ops[4];
802 int i; 803 unsigned int i;
804
803 for (i = 0; i < ARRAY_SIZE(ops); i++) { 805 for (i = 0; i < ARRAY_SIZE(ops); i++) {
804 ops[i].op = REGOP(WRITE_32); 806 ops[i].op = REGOP(WRITE_32);
805 ops[i].type = REGOP(TYPE_GR_CTX); 807 ops[i].type = REGOP(TYPE_GR_CTX);
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
index 3d7e8bd7..a60be7ef 100644
--- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c
@@ -140,7 +140,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched,
140 struct fifo_gk20a *f = &sched->g->fifo; 140 struct fifo_gk20a *f = &sched->g->fifo;
141 struct tsg_gk20a *tsg; 141 struct tsg_gk20a *tsg;
142 u64 *bitmap; 142 u64 *bitmap;
143 int tsgid; 143 unsigned int tsgid;
144 /* pid at user level corresponds to kernel tgid */ 144 /* pid at user level corresponds to kernel tgid */
145 pid_t tgid = (pid_t)arg->pid; 145 pid_t tgid = (pid_t)arg->pid;
146 int err = 0; 146 int err = 0;
@@ -492,7 +492,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp)
492 struct gk20a *g = sched->g; 492 struct gk20a *g = sched->g;
493 struct fifo_gk20a *f = &g->fifo; 493 struct fifo_gk20a *f = &g->fifo;
494 struct tsg_gk20a *tsg; 494 struct tsg_gk20a *tsg;
495 int tsgid; 495 unsigned int tsgid;
496 496
497 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); 497 gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched);
498 498
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
index d96037ce..c73d3c05 100644
--- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
@@ -222,7 +222,7 @@ static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s)
222 * the value of the semaphore then the semaphore has been signaled 222 * the value of the semaphore then the semaphore has been signaled
223 * (a.k.a. released). 223 * (a.k.a. released).
224 */ 224 */
225 return sema_val >= atomic_read(&s->value); 225 return (int)sema_val >= atomic_read(&s->value);
226} 226}
227 227
228static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s) 228static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s)
@@ -240,12 +240,12 @@ static inline u32 gk20a_semaphore_read(struct gk20a_semaphore *s)
240 240
241static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s) 241static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s)
242{ 242{
243 return atomic_read(&s->value); 243 return (u32)atomic_read(&s->value);
244} 244}
245 245
246static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s) 246static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s)
247{ 247{
248 return atomic_read(&s->hw_sema->next_value); 248 return (u32)atomic_read(&s->hw_sema->next_value);
249} 249}
250 250
251/* 251/*
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
index af6af70e..b642981c 100644
--- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c
@@ -445,7 +445,7 @@ static int gk20a_sync_fill_driver_data(struct sync_pt *sync_pt,
445{ 445{
446 struct gk20a_sync_pt_info info; 446 struct gk20a_sync_pt_info info;
447 447
448 if (size < sizeof(info)) 448 if (size < (int)sizeof(info))
449 return -ENOMEM; 449 return -ENOMEM;
450 450
451 info.hw_op_ns = ktime_to_ns(gk20a_sync_pt_duration(sync_pt)); 451 info.hw_op_ns = ktime_to_ns(gk20a_sync_pt_duration(sync_pt));
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 133b737e..43ee79cd 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -65,7 +65,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
65{ 65{
66 struct fifo_gk20a *f = &g->fifo; 66 struct fifo_gk20a *f = &g->fifo;
67 struct fifo_runlist_info_gk20a *runlist; 67 struct fifo_runlist_info_gk20a *runlist;
68 int i; 68 unsigned int i;
69 69
70 for (i = 0; i < f->max_runlists; ++i) { 70 for (i = 0; i < f->max_runlists; ++i) {
71 runlist = &f->runlist_info[i]; 71 runlist = &f->runlist_info[i];
@@ -112,7 +112,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
112 ch->tsgid = tsg->tsgid; 112 ch->tsgid = tsg->tsgid;
113 113
114 /* all the channel part of TSG should need to be same runlist_id */ 114 /* all the channel part of TSG should need to be same runlist_id */
115 if (tsg->runlist_id == ~0) 115 if (tsg->runlist_id == FIFO_INVAL_TSG_ID)
116 tsg->runlist_id = ch->runlist_id; 116 tsg->runlist_id = ch->runlist_id;
117 else if (tsg->runlist_id != ch->runlist_id) { 117 else if (tsg->runlist_id != ch->runlist_id) {
118 gk20a_err(dev_from_gk20a(tsg->g), 118 gk20a_err(dev_from_gk20a(tsg->g),
@@ -154,7 +154,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
154{ 154{
155 struct tsg_gk20a *tsg = NULL; 155 struct tsg_gk20a *tsg = NULL;
156 156
157 if (tsgid < 0 || tsgid >= g->fifo.num_channels) 157 if (tsgid >= g->fifo.num_channels)
158 return -EINVAL; 158 return -EINVAL;
159 159
160 tsg = &g->fifo.tsg[tsgid]; 160 tsg = &g->fifo.tsg[tsgid];
@@ -198,7 +198,7 @@ static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
198} 198}
199 199
200static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg, 200static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
201 int event_id, 201 unsigned int event_id,
202 struct gk20a_event_id_data **event_id_data) 202 struct gk20a_event_id_data **event_id_data)
203{ 203{
204 struct gk20a_event_id_data *local_event_id_data; 204 struct gk20a_event_id_data *local_event_id_data;
@@ -383,7 +383,7 @@ static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
383static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) 383static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f)
384{ 384{
385 struct tsg_gk20a *tsg = NULL; 385 struct tsg_gk20a *tsg = NULL;
386 int tsgid; 386 unsigned int tsgid;
387 387
388 mutex_lock(&f->tsg_inuse_mutex); 388 mutex_lock(&f->tsg_inuse_mutex);
389 for (tsgid = 0; tsgid < f->num_channels; tsgid++) { 389 for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
index e1960102..dbfb068b 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h
@@ -43,9 +43,9 @@ struct tsg_gk20a {
43 int num_active_channels; 43 int num_active_channels;
44 struct mutex ch_list_lock; 44 struct mutex ch_list_lock;
45 45
46 int timeslice_us; 46 unsigned int timeslice_us;
47 int timeslice_timeout; 47 unsigned int timeslice_timeout;
48 int timeslice_scale; 48 unsigned int timeslice_scale;
49 49
50 struct gr_ctx_desc *tsg_gr_ctx; 50 struct gr_ctx_desc *tsg_gr_ctx;
51 51
diff --git a/drivers/gpu/nvgpu/gm206/acr_gm206.c b/drivers/gpu/nvgpu/gm206/acr_gm206.c
index 872ff601..a0e60833 100644
--- a/drivers/gpu/nvgpu/gm206/acr_gm206.c
+++ b/drivers/gpu/nvgpu/gm206/acr_gm206.c
@@ -212,7 +212,8 @@ static int gm206_bootstrap_hs_flcn(struct gk20a *g)
212{ 212{
213 struct mm_gk20a *mm = &g->mm; 213 struct mm_gk20a *mm = &g->mm;
214 struct vm_gk20a *vm = &mm->pmu.vm; 214 struct vm_gk20a *vm = &mm->pmu.vm;
215 int i, err = 0; 215 unsigned int i;
216 int err = 0;
216 u64 *acr_dmem; 217 u64 *acr_dmem;
217 u32 img_size_in_bytes = 0; 218 u32 img_size_in_bytes = 0;
218 u32 status; 219 u32 status;
diff --git a/drivers/gpu/nvgpu/gm206/bios_gm206.c b/drivers/gpu/nvgpu/gm206/bios_gm206.c
index 033c84d6..1f3de0b7 100644
--- a/drivers/gpu/nvgpu/gm206/bios_gm206.c
+++ b/drivers/gpu/nvgpu/gm206/bios_gm206.c
@@ -830,7 +830,7 @@ static int gm206_bios_preos(struct gk20a *g)
830 830
831static int gm206_bios_init(struct gk20a *g) 831static int gm206_bios_init(struct gk20a *g)
832{ 832{
833 int i; 833 unsigned int i;
834 struct gk20a_platform *platform = dev_get_drvdata(g->dev); 834 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
835 struct dentry *d; 835 struct dentry *d;
836 const struct firmware *bios_fw; 836 const struct firmware *bios_fw;
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index f9e2d477..1d0379c5 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -1185,7 +1185,7 @@ int acr_ucode_patch_sig(struct gk20a *g,
1185 unsigned int *p_patch_loc, 1185 unsigned int *p_patch_loc,
1186 unsigned int *p_patch_ind) 1186 unsigned int *p_patch_ind)
1187{ 1187{
1188 int i, *p_sig; 1188 unsigned int i, *p_sig;
1189 gm20b_dbg_pmu(""); 1189 gm20b_dbg_pmu("");
1190 1190
1191 if (!pmu_is_debug_mode_en(g)) { 1191 if (!pmu_is_debug_mode_en(g)) {
diff --git a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
index c65cd450..ecc1d0d5 100644
--- a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c
@@ -101,12 +101,12 @@ static bool gm20b_fb_set_use_full_comp_tag_line(struct gk20a *g)
101 return true; 101 return true;
102} 102}
103 103
104static int gm20b_fb_compression_page_size(struct gk20a *g) 104static unsigned int gm20b_fb_compression_page_size(struct gk20a *g)
105{ 105{
106 return SZ_128K; 106 return SZ_128K;
107} 107}
108 108
109static int gm20b_fb_compressible_page_size(struct gk20a *g) 109static unsigned int gm20b_fb_compressible_page_size(struct gk20a *g)
110{ 110{
111 return SZ_64K; 111 return SZ_64K;
112} 112}
diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
index 3b877db1..bb38a6f1 100644
--- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c
@@ -80,7 +80,7 @@ static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
80 } else { 80 } else {
81 u32 mmu_id = gm20b_engine_id_to_mmu_id(g, 81 u32 mmu_id = gm20b_engine_id_to_mmu_id(g,
82 engine_id); 82 engine_id);
83 if (mmu_id != ~0) 83 if (mmu_id != (u32)~0)
84 gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id), 84 gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id),
85 fifo_trigger_mmu_fault_enable_f(1)); 85 fifo_trigger_mmu_fault_enable_f(1));
86 } 86 }
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 56812fa6..cff0774d 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -548,11 +548,11 @@ static void gr_gm20b_load_tpc_mask(struct gk20a *g)
548 548
549 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); 549 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0);
550 if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask && 550 if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask &&
551 fuse_tpc_mask == (0x1 << g->gr.max_tpc_count) - 1) { 551 fuse_tpc_mask == (0x1U << g->gr.max_tpc_count) - 1U) {
552 u32 val = g->tpc_fs_mask_user; 552 u32 val = g->tpc_fs_mask_user;
553 val &= (0x1 << g->gr.max_tpc_count) - 1; 553 val &= (0x1U << g->gr.max_tpc_count) - 1U;
554 /* skip tpc to disable the other tpc cause channel timeout */ 554 /* skip tpc to disable the other tpc cause channel timeout */
555 val = (0x1 << hweight32(val)) - 1; 555 val = (0x1U << hweight32(val)) - 1U;
556 gk20a_writel(g, gr_fe_tpc_fs_r(), val); 556 gk20a_writel(g, gr_fe_tpc_fs_r(), val);
557 } else { 557 } else {
558 gk20a_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask); 558 gk20a_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask);
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index fee9a807..6cb238b7 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -198,7 +198,7 @@ void gm20b_ltc_init_fs_state(struct gk20a *g)
198void gm20b_ltc_isr(struct gk20a *g) 198void gm20b_ltc_isr(struct gk20a *g)
199{ 199{
200 u32 mc_intr, ltc_intr; 200 u32 mc_intr, ltc_intr;
201 int ltc, slice; 201 unsigned int ltc, slice;
202 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 202 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
203 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); 203 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
204 204
@@ -227,8 +227,8 @@ void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g)
227 u32 data; 227 u32 data;
228 bool done[g->ltc_count]; 228 bool done[g->ltc_count];
229 s32 retry = 100; 229 s32 retry = 100;
230 int i; 230 unsigned int i;
231 int num_done = 0; 231 unsigned int num_done = 0;
232 u32 ltc_d = ltc_ltc1_ltss_g_elpg_r() - ltc_ltc0_ltss_g_elpg_r(); 232 u32 ltc_d = ltc_ltc1_ltss_g_elpg_r() - ltc_ltc0_ltss_g_elpg_r();
233 233
234 gk20a_dbg_fn(""); 234 gk20a_dbg_fn("");
@@ -289,7 +289,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
289void gm20b_flush_ltc(struct gk20a *g) 289void gm20b_flush_ltc(struct gk20a *g)
290{ 290{
291 unsigned long timeout; 291 unsigned long timeout;
292 int ltc; 292 unsigned int ltc;
293 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); 293 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
294 294
295#define __timeout_init() \ 295#define __timeout_init() \
diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
index c312c419..609b497a 100644
--- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c
@@ -30,9 +30,9 @@ static int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s,
30 struct tegra_vgpu_cmd_msg msg; 30 struct tegra_vgpu_cmd_msg msg;
31 struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops; 31 struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops;
32 void *oob; 32 void *oob;
33 size_t oob_size; 33 size_t oob_size, ops_size;
34 void *handle = NULL; 34 void *handle = NULL;
35 int ops_size, err = 0; 35 int err = 0;
36 36
37 gk20a_dbg_fn(""); 37 gk20a_dbg_fn("");
38 BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op)); 38 BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op));
diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
index efeeb8ee..c3669990 100644
--- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c
@@ -184,7 +184,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
184{ 184{
185 struct fifo_runlist_info_gk20a *runlist; 185 struct fifo_runlist_info_gk20a *runlist;
186 struct device *d = dev_from_gk20a(g); 186 struct device *d = dev_from_gk20a(g);
187 s32 runlist_id = -1; 187 unsigned int runlist_id = -1;
188 u32 i; 188 u32 i;
189 u64 runlist_size; 189 u64 runlist_size;
190 190
@@ -238,7 +238,8 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
238 struct fifo_gk20a *f = &g->fifo; 238 struct fifo_gk20a *f = &g->fifo;
239 struct device *d = dev_from_gk20a(g); 239 struct device *d = dev_from_gk20a(g);
240 struct vgpu_priv_data *priv = vgpu_get_priv_data(g); 240 struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
241 int chid, err = 0; 241 unsigned int chid;
242 int err = 0;
242 243
243 gk20a_dbg_fn(""); 244 gk20a_dbg_fn("");
244 245
@@ -486,7 +487,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
486 487
487 /* valid channel, add/remove it from active list. 488 /* valid channel, add/remove it from active list.
488 Otherwise, keep active list untouched for suspend/resume. */ 489 Otherwise, keep active list untouched for suspend/resume. */
489 if (hw_chid != ~0) { 490 if (hw_chid != (u32)~0) {
490 if (add) { 491 if (add) {
491 if (test_and_set_bit(hw_chid, 492 if (test_and_set_bit(hw_chid,
492 runlist->active_channels) == 1) 493 runlist->active_channels) == 1)
@@ -498,7 +499,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
498 } 499 }
499 } 500 }
500 501
501 if (hw_chid != ~0 || /* add/remove a valid channel */ 502 if (hw_chid != (u32)~0 || /* add/remove a valid channel */
502 add /* resume to add all channels back */) { 503 add /* resume to add all channels back */) {
503 u32 chid; 504 u32 chid;
504 505
diff --git a/include/uapi/linux/nvgpu.h b/include/uapi/linux/nvgpu.h
index e4f94c16..b1c602db 100644
--- a/include/uapi/linux/nvgpu.h
+++ b/include/uapi/linux/nvgpu.h
@@ -928,7 +928,7 @@ struct nvgpu_dbg_gpu_suspend_resume_contexts_args {
928 */ 928 */
929 929
930#define NVGPU_IOCTL_MAGIC 'H' 930#define NVGPU_IOCTL_MAGIC 'H'
931#define NVGPU_NO_TIMEOUT (-1) 931#define NVGPU_NO_TIMEOUT ((u32)~0)
932#define NVGPU_PRIORITY_LOW 50 932#define NVGPU_PRIORITY_LOW 50
933#define NVGPU_PRIORITY_MEDIUM 100 933#define NVGPU_PRIORITY_MEDIUM 100
934#define NVGPU_PRIORITY_HIGH 150 934#define NVGPU_PRIORITY_HIGH 150