From d29afd2c9e990799b470bb95a97935cf5b5020db Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Wed, 9 Nov 2016 15:53:16 -0800 Subject: gpu: nvgpu: Fix signed comparison bugs Fix small problems related to signed versus unsigned comparisons throughout the driver. Bump up the warning level to prevent such problems from occuring in future. Change-Id: I8ff5efb419f664e8a2aedadd6515ae4d18502ae0 Signed-off-by: Terje Bergstrom Reviewed-on: http://git-master/r/1252068 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/as_gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/cde_gk20a.c | 11 +++--- drivers/gpu/nvgpu/gk20a/cde_gk20a.h | 10 +++--- drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 6 ++-- drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 11 +++--- drivers/gpu/nvgpu/gk20a/channel_gk20a.h | 6 ++-- drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/debug_gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/fb_gk20a.c | 4 +-- drivers/gpu/nvgpu/gk20a/fence_gk20a.c | 4 +-- drivers/gpu/nvgpu/gk20a/fence_gk20a.h | 2 +- drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 50 +++++++++++++------------- drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | 14 ++++---- drivers/gpu/nvgpu/gk20a/gk20a.h | 4 +-- drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c | 2 +- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 20 ++++++----- drivers/gpu/nvgpu/gk20a/gr_gk20a.h | 4 +-- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 14 ++++---- drivers/gpu/nvgpu/gk20a/platform_gk20a.h | 2 +- drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c | 14 ++++---- drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 7 ++-- drivers/gpu/nvgpu/gk20a/regops_gk20a.c | 8 +++-- drivers/gpu/nvgpu/gk20a/sched_gk20a.c | 4 +-- drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h | 6 ++-- drivers/gpu/nvgpu/gk20a/sync_gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 10 +++--- drivers/gpu/nvgpu/gk20a/tsg_gk20a.h | 6 ++-- drivers/gpu/nvgpu/gm206/acr_gm206.c | 3 +- drivers/gpu/nvgpu/gm206/bios_gm206.c | 2 +- drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 2 +- drivers/gpu/nvgpu/gm20b/fb_gm20b.c | 4 +-- drivers/gpu/nvgpu/gm20b/fifo_gm20b.c | 2 +- drivers/gpu/nvgpu/gm20b/gr_gm20b.c | 6 ++-- drivers/gpu/nvgpu/gm20b/ltc_gm20b.c | 8 ++--- drivers/gpu/nvgpu/vgpu/dbg_vgpu.c | 4 +-- drivers/gpu/nvgpu/vgpu/fifo_vgpu.c | 9 ++--- 37 files changed, 142 insertions(+), 127 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c index 8144ec6e..6cdbe3e1 100644 --- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c @@ -263,7 +263,7 @@ static int gk20a_as_ioctl_get_va_regions( unsigned int write_entries; struct nvgpu_as_va_region __user *user_region_ptr; struct vm_gk20a *vm = as_share->vm; - int page_sizes = gmmu_page_size_kernel; + unsigned int page_sizes = gmmu_page_size_kernel; gk20a_dbg_fn(""); diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index 6b8af929..57b49f2c 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c @@ -47,7 +47,7 @@ static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct gk20a *g); static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx) { - int i; + unsigned int i; for (i = 0; i < cde_ctx->num_bufs; i++) { struct mem_desc *mem = cde_ctx->mem + i; @@ -361,7 +361,8 @@ static int gk20a_cde_patch_params(struct gk20a_cde_ctx *cde_ctx) struct mem_desc *target_mem; u32 *target_mem_ptr; u64 new_data; - int user_id = 0, i, err; + int user_id = 0, err; + unsigned int i; for (i = 0; i < cde_ctx->num_params; i++) { struct gk20a_cde_hdr_param *param = cde_ctx->params + i; @@ -456,7 +457,7 @@ static int gk20a_init_cde_param(struct gk20a_cde_ctx *cde_ctx, } target_mem = cde_ctx->mem + param->target_buf; - if (target_mem->size< (param->target_byte_offset + 3)) { + if (target_mem->size < (param->target_byte_offset + 3)) { gk20a_warn(cde_ctx->dev, "cde: invalid buffer parameter. param idx = %d, target_buf_offs=%lld, target_buf_size=%zu", cde_ctx->num_params, param->target_byte_offset, target_mem->size); @@ -515,7 +516,7 @@ static int gk20a_init_cde_command(struct gk20a_cde_ctx *cde_ctx, { struct nvgpu_gpfifo **gpfifo, *gpfifo_elem; u32 *num_entries; - int i; + unsigned int i; /* check command type */ if (op == TYPE_BUF_COMMAND_INIT) { @@ -615,7 +616,7 @@ static int gk20a_init_cde_img(struct gk20a_cde_ctx *cde_ctx, struct gk20a_cde_hdr_elem *elem; u32 min_size = 0; int err = 0; - int i; + unsigned int i; min_size += 2 * sizeof(u32); if (img->size < min_size) { diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h index 16d6b4ef..8cdba938 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.h @@ -67,8 +67,8 @@ struct gk20a_cde_hdr_replace { u32 source_buf; s32 shift; u32 type; - s64 target_byte_offset; - s64 source_byte_offset; + u64 target_byte_offset; + u64 source_byte_offset; u64 mask; }; @@ -113,7 +113,7 @@ struct gk20a_cde_hdr_param { s32 shift; u32 type; s64 data_offset; - s64 target_byte_offset; + u64 target_byte_offset; u64 mask; }; @@ -223,11 +223,11 @@ struct gk20a_cde_ctx { /* buf converter configuration */ struct mem_desc mem[MAX_CDE_BUFS]; - int num_bufs; + unsigned int num_bufs; /* buffer patching params (where should patching be done) */ struct gk20a_cde_hdr_param params[MAX_CDE_PARAMS]; - int num_params; + unsigned int num_params; /* storage for user space parameter values */ u32 user_param_values[MAX_CDE_USER_PARAMS]; diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index bfd183fb..235bc027 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c @@ -211,10 +211,10 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) kfree(ce_ctx); } -static inline int gk20a_ce_get_method_size(int request_operation) +static inline unsigned int gk20a_ce_get_method_size(int request_operation) { /* failure size */ - int methodsize = ~0; + unsigned int methodsize = UINT_MAX; if (request_operation & NVGPU_CE_PHYS_MODE_TRANSFER) methodsize = 10 * 2 * sizeof(u32); @@ -518,7 +518,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_ALLOCATED; end: - if (ctx_id == ~0) { + if (ctx_id == (u32)~0) { mutex_lock(&ce_app->app_mutex); gk20a_ce_delete_gpu_context(ce_ctx); mutex_unlock(&ce_app->app_mutex); diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index 43a6df0e..8105de11 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -223,7 +223,7 @@ u32 channel_gk20a_pbdma_acquire_val(struct channel_gk20a *c) { u32 val, exp, man; u64 timeout; - int val_len; + unsigned int val_len; val = pbdma_acquire_retry_man_2_f() | pbdma_acquire_retry_exp_2_f(); @@ -238,7 +238,7 @@ u32 channel_gk20a_pbdma_acquire_val(struct channel_gk20a *c) val_len = fls(timeout >> 32) + 32; if (val_len == 32) val_len = fls(timeout); - if (val_len > 16 + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */ + if (val_len > 16U + pbdma_acquire_timeout_exp_max_v()) { /* man: 16bits */ exp = pbdma_acquire_timeout_exp_max_v(); man = pbdma_acquire_timeout_man_max_v(); } else if (val_len > 16) { @@ -1618,7 +1618,8 @@ bool channel_gk20a_is_prealloc_enabled(struct channel_gk20a *c) static int channel_gk20a_prealloc_resources(struct channel_gk20a *c, unsigned int num_jobs) { - int i, err; + unsigned int i; + int err; size_t size; struct priv_cmd_entry *entries = NULL; @@ -3044,7 +3045,7 @@ const struct file_operations gk20a_event_id_ops = { }; static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch, - int event_id, + u32 event_id, struct gk20a_event_id_data **event_id_data) { struct gk20a_event_id_data *local_event_id_data; @@ -3069,7 +3070,7 @@ static int gk20a_channel_get_event_data_from_id(struct channel_gk20a *ch, } void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, - int event_id) + u32 event_id) { struct gk20a_event_id_data *event_id_data; int err = 0; diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h index 832e03e9..66052950 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h @@ -188,7 +188,7 @@ struct channel_gk20a { bool has_timedout; u32 timeout_ms_max; bool timeout_debug_dump; - u32 timeslice_us; + unsigned int timeslice_us; struct dma_buf *error_notifier_ref; struct nvgpu_notification *error_notifier; @@ -309,11 +309,11 @@ int gk20a_channel_get_timescale_from_timeslice(struct gk20a *g, int timeslice_period, int *__timeslice_timeout, int *__timeslice_scale); int gk20a_channel_set_priority(struct channel_gk20a *ch, u32 priority); -int gk20a_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice); +int gk20a_channel_set_timeslice(struct channel_gk20a *ch, unsigned int timeslice); int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch, u32 level); void gk20a_channel_event_id_post_event(struct channel_gk20a *ch, - int event_id); + u32 event_id); void gk20a_channel_setup_ramfc_for_privileged_channel(struct channel_gk20a *c); diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c index febea719..ba8fbc98 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c @@ -192,7 +192,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s, bool need_sync_fence) { u32 thresh; - int incr_cmd_size; + size_t incr_cmd_size; int off; int err; struct gk20a_channel_syncpt *sp = diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c index 493cbe80..56bc2c7a 100644 --- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c @@ -581,7 +581,7 @@ static inline int get_timestamps_zipper(struct gk20a *g, struct nvgpu_gpu_get_cpu_time_correlation_info_args *args) { int err = 0; - int i = 0; + unsigned int i = 0; u32 gpu_timestamp_hi_new = 0; u32 gpu_timestamp_hi_old = 0; diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c index 9a84e2e3..b84db933 100644 --- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c @@ -177,7 +177,7 @@ void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o) { struct fifo_gk20a *f = &g->fifo; u32 chid; - int i; + unsigned int i; struct ch_state **ch_state; diff --git a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c index db02ec7b..2fb7f64b 100644 --- a/drivers/gpu/nvgpu/gk20a/fb_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fb_gk20a.c @@ -51,12 +51,12 @@ static void gk20a_fb_set_mmu_page_size(struct gk20a *g) gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl); } -static int gk20a_fb_compression_page_size(struct gk20a *g) +static unsigned int gk20a_fb_compression_page_size(struct gk20a *g) { return SZ_128K; } -static int gk20a_fb_compressible_page_size(struct gk20a *g) +static unsigned int gk20a_fb_compressible_page_size(struct gk20a *g) { return SZ_64K; } diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c index e046152d..323caa8f 100644 --- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c @@ -114,14 +114,14 @@ int gk20a_fence_install_fd(struct gk20a_fence *f) #endif } -int gk20a_alloc_fence_pool(struct channel_gk20a *c, int count) +int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count) { int err; size_t size; struct gk20a_fence *fence_pool = NULL; size = sizeof(struct gk20a_fence); - if (count <= ULONG_MAX / size) { + if (count <= UINT_MAX / size) { size = count * size; fence_pool = vzalloc(size); } diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h index 97a7d957..beba761a 100644 --- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h @@ -67,7 +67,7 @@ int gk20a_fence_from_syncpt( int gk20a_alloc_fence_pool( struct channel_gk20a *c, - int size); + unsigned int count); void gk20a_free_fence_pool( struct channel_gk20a *c); diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 9887b68f..68394da5 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c @@ -252,7 +252,7 @@ bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id) static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) { - u32 fault_id = ~0; + u32 fault_id = FIFO_INVAL_ENGINE_ID; struct fifo_engine_info_gk20a *engine_info; engine_info = gk20a_fifo_get_engine_info(g, engine_id); @@ -312,7 +312,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) u32 i; u32 max_info_entries = top_device_info__size_1_v(); u32 engine_enum = ENGINE_INVAL_GK20A; - u32 engine_id = ~0; + u32 engine_id = FIFO_INVAL_ENGINE_ID; u32 runlist_id = ~0; u32 pbdma_id = ~0; u32 intr_id = ~0; @@ -428,7 +428,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) { u32 eng_intr_mask = 0; - int i; + unsigned int i; u32 active_engine_id = 0; u32 engine_enum = ENGINE_INVAL_GK20A; @@ -588,7 +588,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) { struct fifo_runlist_info_gk20a *runlist; struct device *d = dev_from_gk20a(g); - s32 runlist_id = -1; + unsigned int runlist_id; u32 i; size_t runlist_size; @@ -653,7 +653,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) u32 intr_stall; u32 mask; u32 timeout; - int i; + unsigned int i; struct gk20a_platform *platform = dev_get_drvdata(g->dev); u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); @@ -777,7 +777,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) { struct fifo_gk20a *f = &g->fifo; struct device *d = dev_from_gk20a(g); - int chid, i, err = 0; + unsigned int chid, i; + int err = 0; gk20a_dbg_fn(""); @@ -974,7 +975,7 @@ static struct channel_gk20a * channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr) { struct gk20a *g = f->g; - int ci; + unsigned int ci; if (unlikely(!f->channel)) return NULL; for (ci = 0; ci < f->num_channels; ci++) { @@ -1461,7 +1462,7 @@ static bool gk20a_fifo_handle_mmu_fault( gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm debugger attached," " deferring channel recovery to channel free"); - } else if (engine_id != ~0) { + } else if (engine_id != FIFO_INVAL_ENGINE_ID) { was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex); mutex_lock(&g->fifo.gr_reset_mutex); /* if lock is already taken, a reset is taking place @@ -1565,7 +1566,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, } mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); - if (mmu_id != ~0) + if (mmu_id != FIFO_INVAL_ENGINE_ID) gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), fifo_trigger_mmu_fault_id_f(mmu_id) | fifo_trigger_mmu_fault_enable_f(1)); @@ -1595,7 +1596,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) { - int i; + unsigned int i; u32 engines = 0; for (i = 0; i < g->fifo.num_engines; i++) { @@ -1712,7 +1713,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, for_each_set_bit(engine_id, &engine_ids, 32) { u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); - if (mmu_id != ~0) + if (mmu_id != FIFO_INVAL_ENGINE_ID) mmu_fault_engines |= BIT(mmu_id); } } else { @@ -1736,7 +1737,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); engine_ids |= BIT(active_engine_id); - if (mmu_id != ~0) + if (mmu_id != FIFO_INVAL_ENGINE_ID) mmu_fault_engines |= BIT(mmu_id); } } @@ -2063,7 +2064,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) && print_channel_reset_log; if (print_channel_reset_log) { - int engine_id; + unsigned int engine_id; gk20a_err(dev_from_gk20a(g), "channel reset initiated from %s; intr=0x%08x", __func__, fifo_intr); @@ -2497,7 +2498,7 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g, int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) { - int i; + unsigned int i; int err = 0, ret = 0; for (i = 0; i < g->fifo.num_engines; i++) { @@ -2519,7 +2520,8 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, bool wait_for_idle) { u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; - u32 pbdma_chid = ~0, engine_chid = ~0, disable; + u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; + u32 engine_chid = FIFO_INVAL_CHANNEL_ID, disable; u32 token = PMU_INVALID_MUTEX_OWNER_ID; u32 mutex_ret; u32 err = 0; @@ -2551,7 +2553,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v()) pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat); - if (pbdma_chid != ~0) { + if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { err = g->ops.fifo.preempt_channel(g, pbdma_chid); if (err) goto clean_up; @@ -2567,7 +2569,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v()) engine_chid = fifo_engine_status_next_id_v(eng_stat); - if (engine_chid != ~0 && engine_chid != pbdma_chid) { + if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { err = g->ops.fifo.preempt_channel(g, engine_chid); if (err) goto clean_up; @@ -2591,7 +2593,7 @@ clean_up: int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, bool wait_for_idle) { - int i; + unsigned int i; int err = 0, ret = 0; u32 active_engine_id; @@ -2609,7 +2611,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, } if (err) { - while (--i >= 0) { + while (i-- != 0) { active_engine_id = g->fifo.active_engines_list[i]; err = gk20a_fifo_enable_engine_activity(g, &g->fifo.engine_info[active_engine_id]); @@ -2626,7 +2628,7 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id) { struct fifo_gk20a *f = &g->fifo; u32 engines = 0; - int i; + unsigned int i; for (i = 0; i < f->num_engines; i++) { u32 active_engine_id = g->fifo.active_engines_list[i]; @@ -2852,7 +2854,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, u32 hw_chid, bool add, bool wait_for_finish) { - u32 ret = 0; + int ret = 0; struct fifo_gk20a *f = &g->fifo; struct fifo_runlist_info_gk20a *runlist = NULL; u32 *runlist_entry_base = NULL; @@ -2867,7 +2869,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, /* valid channel, add/remove it from active list. Otherwise, keep active list untouched for suspend/resume. */ - if (hw_chid != ~0) { + if (hw_chid != FIFO_INVAL_CHANNEL_ID) { ch = &f->channel[hw_chid]; if (gk20a_is_channel_marked_as_tsg(ch)) tsg = &f->tsg[ch->tsgid]; @@ -2909,7 +2911,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, goto clean_up; } - if (hw_chid != ~0 || /* add/remove a valid channel */ + if (hw_chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */ add /* resume to add all channels back */) { u32 max_entries = f->num_runlist_entries; u32 *runlist_end; @@ -3055,7 +3057,7 @@ bool gk20a_fifo_mmu_fault_pending(struct gk20a *g) bool gk20a_fifo_is_engine_busy(struct gk20a *g) { - int i; + unsigned int i; for (i = 0; i < fifo_engine_status__size_1_v(); i++) { u32 status = gk20a_readl(g, fifo_engine_status_r(i)); diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index 64bdeabb..c32142e3 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h @@ -26,7 +26,9 @@ #define MAX_RUNLIST_BUFFERS 2 -#define FIFO_INVAL_ENGINE_ID ~0 +#define FIFO_INVAL_ENGINE_ID ((u32)~0) +#define FIFO_INVAL_CHANNEL_ID ((u32)~0) +#define FIFO_INVAL_TSG_ID ((u32)~0) /* generally corresponds to the "pbdma" engine */ @@ -96,11 +98,11 @@ struct fifo_engine_info_gk20a { struct fifo_gk20a { struct gk20a *g; - int num_channels; - int runlist_entry_size; - int num_runlist_entries; + unsigned int num_channels; + unsigned int runlist_entry_size; + unsigned int num_runlist_entries; - int num_pbdma; + unsigned int num_pbdma; u32 *pbdma_map; struct fifo_engine_info_gk20a *engine_info; @@ -114,7 +116,7 @@ struct fifo_gk20a { struct mem_desc userd; u32 userd_entry_size; - int used_channels; + unsigned int used_channels; struct channel_gk20a *channel; /* zero-kref'd channels here */ struct list_head free_chs; diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index ffddebe7..56b05e94 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h @@ -322,8 +322,8 @@ struct gpu_ops { void (*init_kind_attr)(struct gk20a *g); void (*set_mmu_page_size)(struct gk20a *g); bool (*set_use_full_comp_tag_line)(struct gk20a *g); - int (*compression_page_size)(struct gk20a *g); - int (*compressible_page_size)(struct gk20a *g); + unsigned int (*compression_page_size)(struct gk20a *g); + unsigned int (*compressible_page_size)(struct gk20a *g); void (*dump_vpr_wpr_info)(struct gk20a *g); } fb; struct { diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c b/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c index ab0fbc64..06c33a8c 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c @@ -842,7 +842,7 @@ static const struct gk20a_allocator_ops page_ops = { static int gk20a_page_alloc_init_slabs(struct gk20a_page_allocator *a) { size_t nr_slabs = ilog2(a->page_size >> 12); - int i; + unsigned int i; a->slabs = kcalloc(nr_slabs, sizeof(struct page_alloc_slab), diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index e6103479..ee8b3b63 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -124,7 +124,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g, void gk20a_fecs_dump_falcon_stats(struct gk20a *g) { - int i; + unsigned int i; gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d", gk20a_readl(g, gr_fecs_os_r())); @@ -1395,9 +1395,9 @@ int gr_gk20a_init_fs_state(struct gk20a *g) fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); if (g->tpc_fs_mask_user && - fuse_tpc_mask == (0x1 << gr->max_tpc_count) - 1) { + fuse_tpc_mask == (0x1U << gr->max_tpc_count) - 1U) { u32 val = g->tpc_fs_mask_user; - val &= (0x1 << gr->max_tpc_count) - 1; + val &= (0x1U << gr->max_tpc_count) - 1U; gk20a_writel(g, gr_cwd_fs_r(), gr_cwd_fs_num_gpcs_f(gr->gpc_count) | gr_cwd_fs_num_tpcs_f(hweight32(val))); @@ -1444,7 +1444,7 @@ static u32 gk20a_init_sw_bundle(struct gk20a *g) struct av_list_gk20a *sw_bundle_init = &g->gr.ctx_vars.sw_bundle_init; u32 last_bundle_data = 0; u32 err = 0; - int i; + unsigned int i; unsigned long end_jiffies = jiffies + msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); @@ -2110,7 +2110,7 @@ static int gr_gk20a_copy_ctxsw_ucode_segments( u32 *bootimage, u32 *code, u32 *data) { - int i; + unsigned int i; gk20a_mem_wr_n(g, dst, segments->boot.offset, bootimage, segments->boot.size); @@ -4048,7 +4048,8 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr, static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) { - int i, ret; + unsigned int i; + int ret; for (i = 0; i < gr->max_used_color_index; i++) { struct zbc_color_table *c_tbl = &gr->zbc_col_tbl[i]; @@ -4898,7 +4899,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g) DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size, PAGE_SIZE); u32 *whitelist = NULL; - int num_entries = 0; + unsigned int num_entries = 0; if (gk20a_mem_begin(g, mem)) { gk20a_err(dev_from_gk20a(g), @@ -6996,7 +6997,7 @@ static void gr_gk20a_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset) gk20a_writel(g, gpc_tpc_addr, reg); } -#define ILLEGAL_ID (~0) +#define ILLEGAL_ID ((u32)~0) static inline bool check_main_image_header_magic(u8 *context) { @@ -8762,7 +8763,8 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g, struct channel_gk20a *ch, u64 sms, bool enable) { struct nvgpu_dbg_gpu_reg_op *ops; - int i = 0, sm_id, err; + unsigned int i = 0, sm_id; + int err; u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h index d03f945c..662d9a87 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h @@ -334,8 +334,8 @@ struct gr_gk20a { s32 max_default_color_index; s32 max_default_depth_index; - s32 max_used_color_index; - s32 max_used_depth_index; + u32 max_used_color_index; + u32 max_used_depth_index; #define GR_CHANNEL_MAP_TLB_SIZE 2 /* must of power of 2 */ struct gr_channel_map_tlb_entry chid_tlb[GR_CHANNEL_MAP_TLB_SIZE]; diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 9906b77b..fcd5d664 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -789,10 +789,10 @@ static void gk20a_remove_mm_ce_support(struct mm_gk20a *mm) struct gk20a *g = gk20a_from_mm(mm); struct gk20a_platform *platform = gk20a_get_platform(g->dev); - if (mm->vidmem.ce_ctx_id != ~0) + if (mm->vidmem.ce_ctx_id != (u32)~0) gk20a_ce_delete_context(g->dev, mm->vidmem.ce_ctx_id); - mm->vidmem.ce_ctx_id = ~0; + mm->vidmem.ce_ctx_id = (u32)~0; if (platform->has_ce) gk20a_vm_remove_support_nofree(&mm->ce.vm); @@ -836,7 +836,7 @@ static int gk20a_vidmem_clear_all(struct gk20a *g) u64 region2_base = 0; int err = 0; - if (mm->vidmem.ce_ctx_id == ~0) + if (mm->vidmem.ce_ctx_id == (u32)~0) return -EINVAL; err = gk20a_ce_execute_ops(g->dev, @@ -989,7 +989,7 @@ int gk20a_init_mm_setup_sw(struct gk20a *g) gk20a_init_pramin(mm); - mm->vidmem.ce_ctx_id = ~0; + mm->vidmem.ce_ctx_id = (u32)~0; err = gk20a_init_vidmem(mm); if (err) @@ -1119,7 +1119,7 @@ int gk20a_init_mm_support(struct gk20a *g) void gk20a_init_mm_ce_context(struct gk20a *g) { #if defined(CONFIG_GK20A_VIDMEM) - if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == ~0)) { + if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == (u32)~0)) { g->mm.vidmem.ce_ctx_id = gk20a_ce_create_context_with_cb(g->dev, gk20a_fifo_get_fast_ce_runlist_id(g), @@ -1128,7 +1128,7 @@ void gk20a_init_mm_ce_context(struct gk20a *g) -1, NULL); - if (g->mm.vidmem.ce_ctx_id == ~0) + if (g->mm.vidmem.ce_ctx_id == (u32)~0) gk20a_err(g->dev, "Failed to allocate CE context for vidmem page clearing support"); } @@ -3021,7 +3021,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem) struct page_alloc_chunk *chunk = NULL; int err = 0; - if (g->mm.vidmem.ce_ctx_id == ~0) + if (g->mm.vidmem.ce_ctx_id == (u32)~0) return -EINVAL; alloc = get_vidmem_page_alloc(mem->sgt->sgl); diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h index f13a11ea..3d5cd1b2 100644 --- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h @@ -56,7 +56,7 @@ struct gk20a_platform { bool has_syncpoints; /* channel limit after which to start aggressive sync destroy */ - int aggressive_sync_destroy_thresh; + unsigned int aggressive_sync_destroy_thresh; /* flag to set sync destroy aggressiveness */ bool aggressive_sync_destroy; diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c index 35d524f1..01ff5f96 100644 --- a/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c +++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c @@ -58,8 +58,8 @@ extern struct device tegra_vpr_dev; struct gk20a_emc_params { - long bw_ratio; - long freq_last_set; + unsigned long bw_ratio; + unsigned long freq_last_set; }; static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); @@ -217,7 +217,7 @@ static void gk20a_tegra_postscale(struct device *dev, struct clk *emc_clk = platform->clk[2]; enum tegra_chipid chip_id = tegra_get_chip_id(); unsigned long emc_target; - long emc_freq_lower, emc_freq_upper, emc_freq_rounded; + unsigned long emc_freq_lower, emc_freq_upper, emc_freq_rounded; emc_target = gk20a_tegra_get_emc_rate(g, emc_params); @@ -234,8 +234,10 @@ static void gk20a_tegra_postscale(struct device *dev, break; case TEGRA_CHIPID_TEGRA21: - emc_freq_lower = tegra_emc_round_rate_updown(emc_target, false); - emc_freq_upper = tegra_emc_round_rate_updown(emc_target, true); + emc_freq_lower = (unsigned long) + tegra_emc_round_rate_updown(emc_target, false); + emc_freq_upper = (unsigned long) + tegra_emc_round_rate_updown(emc_target, true); /* round to the nearest frequency step */ if (emc_target < (emc_freq_lower + emc_freq_upper) / 2) @@ -645,7 +647,7 @@ static int gk20a_tegra_get_clocks(struct device *dev) { struct gk20a_platform *platform = dev_get_drvdata(dev); char devname[16]; - int i; + unsigned int i; int ret = 0; BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks)); diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index bfa3902e..193938ba 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -2709,7 +2709,7 @@ static bool pmu_queue_has_room(struct pmu_gk20a *pmu, { u32 head, tail; bool rewind = false; - int free; + unsigned int free; size = ALIGN(size, QUEUE_ALIGNMENT); @@ -2955,7 +2955,8 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; struct device *d = dev_from_gk20a(g); - int i, err = 0; + unsigned int i; + int err = 0; u8 *ptr; gk20a_dbg_fn(""); @@ -4128,7 +4129,7 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu) void pmu_dump_falcon_stats(struct pmu_gk20a *pmu) { struct gk20a *g = gk20a_from_pmu(pmu); - int i; + unsigned int i; gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d", gk20a_readl(g, pwr_falcon_os_r())); diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index 8b87b523..b0754070 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c @@ -35,7 +35,7 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem) if (key < prange->base) return -1; else if (prange->base <= key && key < (prange->base + - (prange->count * 4))) + (prange->count * 4U))) return 0; return 1; } @@ -379,7 +379,8 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, struct nvgpu_dbg_gpu_reg_op *ops, u64 num_ops) { - int err = 0, i; + int err = 0; + unsigned int i; struct channel_gk20a *ch = NULL; struct gk20a *g = dbg_s->g; /*struct gr_gk20a *gr = &g->gr;*/ @@ -799,7 +800,8 @@ static int gk20a_apply_smpc_war(struct dbg_session_gk20a *dbg_s) * it was already swapped out in/out once or not, etc. */ struct nvgpu_dbg_gpu_reg_op ops[4]; - int i; + unsigned int i; + for (i = 0; i < ARRAY_SIZE(ops); i++) { ops[i].op = REGOP(WRITE_32); ops[i].type = REGOP(TYPE_GR_CTX); diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c index 3d7e8bd7..a60be7ef 100644 --- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c @@ -140,7 +140,7 @@ static int gk20a_sched_dev_ioctl_get_tsgs_by_pid(struct gk20a_sched_ctrl *sched, struct fifo_gk20a *f = &sched->g->fifo; struct tsg_gk20a *tsg; u64 *bitmap; - int tsgid; + unsigned int tsgid; /* pid at user level corresponds to kernel tgid */ pid_t tgid = (pid_t)arg->pid; int err = 0; @@ -492,7 +492,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp) struct gk20a *g = sched->g; struct fifo_gk20a *f = &g->fifo; struct tsg_gk20a *tsg; - int tsgid; + unsigned int tsgid; gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "sched: %p", sched); diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h index d96037ce..c73d3c05 100644 --- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h @@ -222,7 +222,7 @@ static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s) * the value of the semaphore then the semaphore has been signaled * (a.k.a. released). */ - return sema_val >= atomic_read(&s->value); + return (int)sema_val >= atomic_read(&s->value); } static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s) @@ -240,12 +240,12 @@ static inline u32 gk20a_semaphore_read(struct gk20a_semaphore *s) static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s) { - return atomic_read(&s->value); + return (u32)atomic_read(&s->value); } static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s) { - return atomic_read(&s->hw_sema->next_value); + return (u32)atomic_read(&s->hw_sema->next_value); } /* diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c index af6af70e..b642981c 100644 --- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c @@ -445,7 +445,7 @@ static int gk20a_sync_fill_driver_data(struct sync_pt *sync_pt, { struct gk20a_sync_pt_info info; - if (size < sizeof(info)) + if (size < (int)sizeof(info)) return -ENOMEM; info.hw_op_ns = ktime_to_ns(gk20a_sync_pt_duration(sync_pt)); diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 133b737e..43ee79cd 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c @@ -65,7 +65,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) { struct fifo_gk20a *f = &g->fifo; struct fifo_runlist_info_gk20a *runlist; - int i; + unsigned int i; for (i = 0; i < f->max_runlists; ++i) { runlist = &f->runlist_info[i]; @@ -112,7 +112,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, ch->tsgid = tsg->tsgid; /* all the channel part of TSG should need to be same runlist_id */ - if (tsg->runlist_id == ~0) + if (tsg->runlist_id == FIFO_INVAL_TSG_ID) tsg->runlist_id = ch->runlist_id; else if (tsg->runlist_id != ch->runlist_id) { gk20a_err(dev_from_gk20a(tsg->g), @@ -154,7 +154,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) { struct tsg_gk20a *tsg = NULL; - if (tsgid < 0 || tsgid >= g->fifo.num_channels) + if (tsgid >= g->fifo.num_channels) return -EINVAL; tsg = &g->fifo.tsg[tsgid]; @@ -198,7 +198,7 @@ static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg, } static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg, - int event_id, + unsigned int event_id, struct gk20a_event_id_data **event_id_data) { struct gk20a_event_id_data *local_event_id_data; @@ -383,7 +383,7 @@ static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f) { struct tsg_gk20a *tsg = NULL; - int tsgid; + unsigned int tsgid; mutex_lock(&f->tsg_inuse_mutex); for (tsgid = 0; tsgid < f->num_channels; tsgid++) { diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h index e1960102..dbfb068b 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h @@ -43,9 +43,9 @@ struct tsg_gk20a { int num_active_channels; struct mutex ch_list_lock; - int timeslice_us; - int timeslice_timeout; - int timeslice_scale; + unsigned int timeslice_us; + unsigned int timeslice_timeout; + unsigned int timeslice_scale; struct gr_ctx_desc *tsg_gr_ctx; diff --git a/drivers/gpu/nvgpu/gm206/acr_gm206.c b/drivers/gpu/nvgpu/gm206/acr_gm206.c index 872ff601..a0e60833 100644 --- a/drivers/gpu/nvgpu/gm206/acr_gm206.c +++ b/drivers/gpu/nvgpu/gm206/acr_gm206.c @@ -212,7 +212,8 @@ static int gm206_bootstrap_hs_flcn(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; struct vm_gk20a *vm = &mm->pmu.vm; - int i, err = 0; + unsigned int i; + int err = 0; u64 *acr_dmem; u32 img_size_in_bytes = 0; u32 status; diff --git a/drivers/gpu/nvgpu/gm206/bios_gm206.c b/drivers/gpu/nvgpu/gm206/bios_gm206.c index 033c84d6..1f3de0b7 100644 --- a/drivers/gpu/nvgpu/gm206/bios_gm206.c +++ b/drivers/gpu/nvgpu/gm206/bios_gm206.c @@ -830,7 +830,7 @@ static int gm206_bios_preos(struct gk20a *g) static int gm206_bios_init(struct gk20a *g) { - int i; + unsigned int i; struct gk20a_platform *platform = dev_get_drvdata(g->dev); struct dentry *d; const struct firmware *bios_fw; diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index f9e2d477..1d0379c5 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c @@ -1185,7 +1185,7 @@ int acr_ucode_patch_sig(struct gk20a *g, unsigned int *p_patch_loc, unsigned int *p_patch_ind) { - int i, *p_sig; + unsigned int i, *p_sig; gm20b_dbg_pmu(""); if (!pmu_is_debug_mode_en(g)) { diff --git a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c index c65cd450..ecc1d0d5 100644 --- a/drivers/gpu/nvgpu/gm20b/fb_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fb_gm20b.c @@ -101,12 +101,12 @@ static bool gm20b_fb_set_use_full_comp_tag_line(struct gk20a *g) return true; } -static int gm20b_fb_compression_page_size(struct gk20a *g) +static unsigned int gm20b_fb_compression_page_size(struct gk20a *g) { return SZ_128K; } -static int gm20b_fb_compressible_page_size(struct gk20a *g) +static unsigned int gm20b_fb_compressible_page_size(struct gk20a *g) { return SZ_64K; } diff --git a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c index 3b877db1..bb38a6f1 100644 --- a/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/fifo_gm20b.c @@ -80,7 +80,7 @@ static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g, } else { u32 mmu_id = gm20b_engine_id_to_mmu_id(g, engine_id); - if (mmu_id != ~0) + if (mmu_id != (u32)~0) gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id), fifo_trigger_mmu_fault_enable_f(1)); } diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c index 56812fa6..cff0774d 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c @@ -548,11 +548,11 @@ static void gr_gm20b_load_tpc_mask(struct gk20a *g) fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); if (g->tpc_fs_mask_user && g->tpc_fs_mask_user != fuse_tpc_mask && - fuse_tpc_mask == (0x1 << g->gr.max_tpc_count) - 1) { + fuse_tpc_mask == (0x1U << g->gr.max_tpc_count) - 1U) { u32 val = g->tpc_fs_mask_user; - val &= (0x1 << g->gr.max_tpc_count) - 1; + val &= (0x1U << g->gr.max_tpc_count) - 1U; /* skip tpc to disable the other tpc cause channel timeout */ - val = (0x1 << hweight32(val)) - 1; + val = (0x1U << hweight32(val)) - 1U; gk20a_writel(g, gr_fe_tpc_fs_r(), val); } else { gk20a_writel(g, gr_fe_tpc_fs_r(), pes_tpc_mask); diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c index fee9a807..6cb238b7 100644 --- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c @@ -198,7 +198,7 @@ void gm20b_ltc_init_fs_state(struct gk20a *g) void gm20b_ltc_isr(struct gk20a *g) { u32 mc_intr, ltc_intr; - int ltc, slice; + unsigned int ltc, slice; u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); @@ -227,8 +227,8 @@ void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g) u32 data; bool done[g->ltc_count]; s32 retry = 100; - int i; - int num_done = 0; + unsigned int i; + unsigned int num_done = 0; u32 ltc_d = ltc_ltc1_ltss_g_elpg_r() - ltc_ltc0_ltss_g_elpg_r(); gk20a_dbg_fn(""); @@ -289,7 +289,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base) void gm20b_flush_ltc(struct gk20a *g) { unsigned long timeout; - int ltc; + unsigned int ltc; u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); #define __timeout_init() \ diff --git a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c index c312c419..609b497a 100644 --- a/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/dbg_vgpu.c @@ -30,9 +30,9 @@ static int vgpu_exec_regops(struct dbg_session_gk20a *dbg_s, struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_reg_ops_params *p = &msg.params.reg_ops; void *oob; - size_t oob_size; + size_t oob_size, ops_size; void *handle = NULL; - int ops_size, err = 0; + int err = 0; gk20a_dbg_fn(""); BUG_ON(sizeof(*ops) != sizeof(struct tegra_vgpu_reg_op)); diff --git a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c index efeeb8ee..c3669990 100644 --- a/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/fifo_vgpu.c @@ -184,7 +184,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) { struct fifo_runlist_info_gk20a *runlist; struct device *d = dev_from_gk20a(g); - s32 runlist_id = -1; + unsigned int runlist_id = -1; u32 i; u64 runlist_size; @@ -238,7 +238,8 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g) struct fifo_gk20a *f = &g->fifo; struct device *d = dev_from_gk20a(g); struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - int chid, err = 0; + unsigned int chid; + int err = 0; gk20a_dbg_fn(""); @@ -486,7 +487,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, /* valid channel, add/remove it from active list. Otherwise, keep active list untouched for suspend/resume. */ - if (hw_chid != ~0) { + if (hw_chid != (u32)~0) { if (add) { if (test_and_set_bit(hw_chid, runlist->active_channels) == 1) @@ -498,7 +499,7 @@ static int vgpu_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, } } - if (hw_chid != ~0 || /* add/remove a valid channel */ + if (hw_chid != (u32)~0 || /* add/remove a valid channel */ add /* resume to add all channels back */) { u32 chid; -- cgit v1.2.2