diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2016-11-09 18:53:16 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2016-11-17 00:35:36 -0500 |
commit | d29afd2c9e990799b470bb95a97935cf5b5020db (patch) | |
tree | 48f14dffe90956e9b42b02a87b95fd7a1dfeef3e /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |
parent | 5494e846c78dd0da74635905ead3abe45502375f (diff) |
gpu: nvgpu: Fix signed comparison bugs
Fix small problems related to signed versus unsigned comparisons
throughout the driver. Bump up the warning level to prevent such
problems from occuring in future.
Change-Id: I8ff5efb419f664e8a2aedadd6515ae4d18502ae0
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1252068
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 50 |
1 files changed, 26 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index 9887b68f..68394da5 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -252,7 +252,7 @@ bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id) | |||
252 | 252 | ||
253 | static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) | 253 | static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) |
254 | { | 254 | { |
255 | u32 fault_id = ~0; | 255 | u32 fault_id = FIFO_INVAL_ENGINE_ID; |
256 | struct fifo_engine_info_gk20a *engine_info; | 256 | struct fifo_engine_info_gk20a *engine_info; |
257 | 257 | ||
258 | engine_info = gk20a_fifo_get_engine_info(g, engine_id); | 258 | engine_info = gk20a_fifo_get_engine_info(g, engine_id); |
@@ -312,7 +312,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | |||
312 | u32 i; | 312 | u32 i; |
313 | u32 max_info_entries = top_device_info__size_1_v(); | 313 | u32 max_info_entries = top_device_info__size_1_v(); |
314 | u32 engine_enum = ENGINE_INVAL_GK20A; | 314 | u32 engine_enum = ENGINE_INVAL_GK20A; |
315 | u32 engine_id = ~0; | 315 | u32 engine_id = FIFO_INVAL_ENGINE_ID; |
316 | u32 runlist_id = ~0; | 316 | u32 runlist_id = ~0; |
317 | u32 pbdma_id = ~0; | 317 | u32 pbdma_id = ~0; |
318 | u32 intr_id = ~0; | 318 | u32 intr_id = ~0; |
@@ -428,7 +428,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f) | |||
428 | u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) | 428 | u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g) |
429 | { | 429 | { |
430 | u32 eng_intr_mask = 0; | 430 | u32 eng_intr_mask = 0; |
431 | int i; | 431 | unsigned int i; |
432 | u32 active_engine_id = 0; | 432 | u32 active_engine_id = 0; |
433 | u32 engine_enum = ENGINE_INVAL_GK20A; | 433 | u32 engine_enum = ENGINE_INVAL_GK20A; |
434 | 434 | ||
@@ -588,7 +588,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
588 | { | 588 | { |
589 | struct fifo_runlist_info_gk20a *runlist; | 589 | struct fifo_runlist_info_gk20a *runlist; |
590 | struct device *d = dev_from_gk20a(g); | 590 | struct device *d = dev_from_gk20a(g); |
591 | s32 runlist_id = -1; | 591 | unsigned int runlist_id; |
592 | u32 i; | 592 | u32 i; |
593 | size_t runlist_size; | 593 | size_t runlist_size; |
594 | 594 | ||
@@ -653,7 +653,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g) | |||
653 | u32 intr_stall; | 653 | u32 intr_stall; |
654 | u32 mask; | 654 | u32 mask; |
655 | u32 timeout; | 655 | u32 timeout; |
656 | int i; | 656 | unsigned int i; |
657 | struct gk20a_platform *platform = dev_get_drvdata(g->dev); | 657 | struct gk20a_platform *platform = dev_get_drvdata(g->dev); |
658 | u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); | 658 | u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); |
659 | 659 | ||
@@ -777,7 +777,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
777 | { | 777 | { |
778 | struct fifo_gk20a *f = &g->fifo; | 778 | struct fifo_gk20a *f = &g->fifo; |
779 | struct device *d = dev_from_gk20a(g); | 779 | struct device *d = dev_from_gk20a(g); |
780 | int chid, i, err = 0; | 780 | unsigned int chid, i; |
781 | int err = 0; | ||
781 | 782 | ||
782 | gk20a_dbg_fn(""); | 783 | gk20a_dbg_fn(""); |
783 | 784 | ||
@@ -974,7 +975,7 @@ static struct channel_gk20a * | |||
974 | channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr) | 975 | channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr) |
975 | { | 976 | { |
976 | struct gk20a *g = f->g; | 977 | struct gk20a *g = f->g; |
977 | int ci; | 978 | unsigned int ci; |
978 | if (unlikely(!f->channel)) | 979 | if (unlikely(!f->channel)) |
979 | return NULL; | 980 | return NULL; |
980 | for (ci = 0; ci < f->num_channels; ci++) { | 981 | for (ci = 0; ci < f->num_channels; ci++) { |
@@ -1461,7 +1462,7 @@ static bool gk20a_fifo_handle_mmu_fault( | |||
1461 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, | 1462 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, |
1462 | "sm debugger attached," | 1463 | "sm debugger attached," |
1463 | " deferring channel recovery to channel free"); | 1464 | " deferring channel recovery to channel free"); |
1464 | } else if (engine_id != ~0) { | 1465 | } else if (engine_id != FIFO_INVAL_ENGINE_ID) { |
1465 | was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex); | 1466 | was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex); |
1466 | mutex_lock(&g->fifo.gr_reset_mutex); | 1467 | mutex_lock(&g->fifo.gr_reset_mutex); |
1467 | /* if lock is already taken, a reset is taking place | 1468 | /* if lock is already taken, a reset is taking place |
@@ -1565,7 +1566,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, | |||
1565 | } | 1566 | } |
1566 | 1567 | ||
1567 | mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); | 1568 | mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); |
1568 | if (mmu_id != ~0) | 1569 | if (mmu_id != FIFO_INVAL_ENGINE_ID) |
1569 | gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), | 1570 | gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), |
1570 | fifo_trigger_mmu_fault_id_f(mmu_id) | | 1571 | fifo_trigger_mmu_fault_id_f(mmu_id) | |
1571 | fifo_trigger_mmu_fault_enable_f(1)); | 1572 | fifo_trigger_mmu_fault_enable_f(1)); |
@@ -1595,7 +1596,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, | |||
1595 | 1596 | ||
1596 | static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) | 1597 | static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg) |
1597 | { | 1598 | { |
1598 | int i; | 1599 | unsigned int i; |
1599 | u32 engines = 0; | 1600 | u32 engines = 0; |
1600 | 1601 | ||
1601 | for (i = 0; i < g->fifo.num_engines; i++) { | 1602 | for (i = 0; i < g->fifo.num_engines; i++) { |
@@ -1712,7 +1713,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | |||
1712 | for_each_set_bit(engine_id, &engine_ids, 32) { | 1713 | for_each_set_bit(engine_id, &engine_ids, 32) { |
1713 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); | 1714 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); |
1714 | 1715 | ||
1715 | if (mmu_id != ~0) | 1716 | if (mmu_id != FIFO_INVAL_ENGINE_ID) |
1716 | mmu_fault_engines |= BIT(mmu_id); | 1717 | mmu_fault_engines |= BIT(mmu_id); |
1717 | } | 1718 | } |
1718 | } else { | 1719 | } else { |
@@ -1736,7 +1737,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | |||
1736 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); | 1737 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); |
1737 | 1738 | ||
1738 | engine_ids |= BIT(active_engine_id); | 1739 | engine_ids |= BIT(active_engine_id); |
1739 | if (mmu_id != ~0) | 1740 | if (mmu_id != FIFO_INVAL_ENGINE_ID) |
1740 | mmu_fault_engines |= BIT(mmu_id); | 1741 | mmu_fault_engines |= BIT(mmu_id); |
1741 | } | 1742 | } |
1742 | } | 1743 | } |
@@ -2063,7 +2064,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr) | |||
2063 | && print_channel_reset_log; | 2064 | && print_channel_reset_log; |
2064 | 2065 | ||
2065 | if (print_channel_reset_log) { | 2066 | if (print_channel_reset_log) { |
2066 | int engine_id; | 2067 | unsigned int engine_id; |
2067 | gk20a_err(dev_from_gk20a(g), | 2068 | gk20a_err(dev_from_gk20a(g), |
2068 | "channel reset initiated from %s; intr=0x%08x", | 2069 | "channel reset initiated from %s; intr=0x%08x", |
2069 | __func__, fifo_intr); | 2070 | __func__, fifo_intr); |
@@ -2497,7 +2498,7 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g, | |||
2497 | 2498 | ||
2498 | int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) | 2499 | int gk20a_fifo_enable_all_engine_activity(struct gk20a *g) |
2499 | { | 2500 | { |
2500 | int i; | 2501 | unsigned int i; |
2501 | int err = 0, ret = 0; | 2502 | int err = 0, ret = 0; |
2502 | 2503 | ||
2503 | for (i = 0; i < g->fifo.num_engines; i++) { | 2504 | for (i = 0; i < g->fifo.num_engines; i++) { |
@@ -2519,7 +2520,8 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, | |||
2519 | bool wait_for_idle) | 2520 | bool wait_for_idle) |
2520 | { | 2521 | { |
2521 | u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; | 2522 | u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat; |
2522 | u32 pbdma_chid = ~0, engine_chid = ~0, disable; | 2523 | u32 pbdma_chid = FIFO_INVAL_CHANNEL_ID; |
2524 | u32 engine_chid = FIFO_INVAL_CHANNEL_ID, disable; | ||
2523 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; | 2525 | u32 token = PMU_INVALID_MUTEX_OWNER_ID; |
2524 | u32 mutex_ret; | 2526 | u32 mutex_ret; |
2525 | u32 err = 0; | 2527 | u32 err = 0; |
@@ -2551,7 +2553,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, | |||
2551 | chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v()) | 2553 | chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v()) |
2552 | pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat); | 2554 | pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat); |
2553 | 2555 | ||
2554 | if (pbdma_chid != ~0) { | 2556 | if (pbdma_chid != FIFO_INVAL_CHANNEL_ID) { |
2555 | err = g->ops.fifo.preempt_channel(g, pbdma_chid); | 2557 | err = g->ops.fifo.preempt_channel(g, pbdma_chid); |
2556 | if (err) | 2558 | if (err) |
2557 | goto clean_up; | 2559 | goto clean_up; |
@@ -2567,7 +2569,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g, | |||
2567 | ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v()) | 2569 | ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v()) |
2568 | engine_chid = fifo_engine_status_next_id_v(eng_stat); | 2570 | engine_chid = fifo_engine_status_next_id_v(eng_stat); |
2569 | 2571 | ||
2570 | if (engine_chid != ~0 && engine_chid != pbdma_chid) { | 2572 | if (engine_chid != FIFO_INVAL_ENGINE_ID && engine_chid != pbdma_chid) { |
2571 | err = g->ops.fifo.preempt_channel(g, engine_chid); | 2573 | err = g->ops.fifo.preempt_channel(g, engine_chid); |
2572 | if (err) | 2574 | if (err) |
2573 | goto clean_up; | 2575 | goto clean_up; |
@@ -2591,7 +2593,7 @@ clean_up: | |||
2591 | int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, | 2593 | int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, |
2592 | bool wait_for_idle) | 2594 | bool wait_for_idle) |
2593 | { | 2595 | { |
2594 | int i; | 2596 | unsigned int i; |
2595 | int err = 0, ret = 0; | 2597 | int err = 0, ret = 0; |
2596 | u32 active_engine_id; | 2598 | u32 active_engine_id; |
2597 | 2599 | ||
@@ -2609,7 +2611,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g, | |||
2609 | } | 2611 | } |
2610 | 2612 | ||
2611 | if (err) { | 2613 | if (err) { |
2612 | while (--i >= 0) { | 2614 | while (i-- != 0) { |
2613 | active_engine_id = g->fifo.active_engines_list[i]; | 2615 | active_engine_id = g->fifo.active_engines_list[i]; |
2614 | err = gk20a_fifo_enable_engine_activity(g, | 2616 | err = gk20a_fifo_enable_engine_activity(g, |
2615 | &g->fifo.engine_info[active_engine_id]); | 2617 | &g->fifo.engine_info[active_engine_id]); |
@@ -2626,7 +2628,7 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id) | |||
2626 | { | 2628 | { |
2627 | struct fifo_gk20a *f = &g->fifo; | 2629 | struct fifo_gk20a *f = &g->fifo; |
2628 | u32 engines = 0; | 2630 | u32 engines = 0; |
2629 | int i; | 2631 | unsigned int i; |
2630 | 2632 | ||
2631 | for (i = 0; i < f->num_engines; i++) { | 2633 | for (i = 0; i < f->num_engines; i++) { |
2632 | u32 active_engine_id = g->fifo.active_engines_list[i]; | 2634 | u32 active_engine_id = g->fifo.active_engines_list[i]; |
@@ -2852,7 +2854,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
2852 | u32 hw_chid, bool add, | 2854 | u32 hw_chid, bool add, |
2853 | bool wait_for_finish) | 2855 | bool wait_for_finish) |
2854 | { | 2856 | { |
2855 | u32 ret = 0; | 2857 | int ret = 0; |
2856 | struct fifo_gk20a *f = &g->fifo; | 2858 | struct fifo_gk20a *f = &g->fifo; |
2857 | struct fifo_runlist_info_gk20a *runlist = NULL; | 2859 | struct fifo_runlist_info_gk20a *runlist = NULL; |
2858 | u32 *runlist_entry_base = NULL; | 2860 | u32 *runlist_entry_base = NULL; |
@@ -2867,7 +2869,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
2867 | 2869 | ||
2868 | /* valid channel, add/remove it from active list. | 2870 | /* valid channel, add/remove it from active list. |
2869 | Otherwise, keep active list untouched for suspend/resume. */ | 2871 | Otherwise, keep active list untouched for suspend/resume. */ |
2870 | if (hw_chid != ~0) { | 2872 | if (hw_chid != FIFO_INVAL_CHANNEL_ID) { |
2871 | ch = &f->channel[hw_chid]; | 2873 | ch = &f->channel[hw_chid]; |
2872 | if (gk20a_is_channel_marked_as_tsg(ch)) | 2874 | if (gk20a_is_channel_marked_as_tsg(ch)) |
2873 | tsg = &f->tsg[ch->tsgid]; | 2875 | tsg = &f->tsg[ch->tsgid]; |
@@ -2909,7 +2911,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
2909 | goto clean_up; | 2911 | goto clean_up; |
2910 | } | 2912 | } |
2911 | 2913 | ||
2912 | if (hw_chid != ~0 || /* add/remove a valid channel */ | 2914 | if (hw_chid != FIFO_INVAL_CHANNEL_ID || /* add/remove a valid channel */ |
2913 | add /* resume to add all channels back */) { | 2915 | add /* resume to add all channels back */) { |
2914 | u32 max_entries = f->num_runlist_entries; | 2916 | u32 max_entries = f->num_runlist_entries; |
2915 | u32 *runlist_end; | 2917 | u32 *runlist_end; |
@@ -3055,7 +3057,7 @@ bool gk20a_fifo_mmu_fault_pending(struct gk20a *g) | |||
3055 | 3057 | ||
3056 | bool gk20a_fifo_is_engine_busy(struct gk20a *g) | 3058 | bool gk20a_fifo_is_engine_busy(struct gk20a *g) |
3057 | { | 3059 | { |
3058 | int i; | 3060 | unsigned int i; |
3059 | 3061 | ||
3060 | for (i = 0; i < fifo_engine_status__size_1_v(); i++) { | 3062 | for (i = 0; i < fifo_engine_status__size_1_v(); i++) { |
3061 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); | 3063 | u32 status = gk20a_readl(g, fifo_engine_status_r(i)); |