summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-11-17 16:49:53 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2016-12-05 19:16:24 -0500
commit21094783114b9314d57f412196544a34b3a40f4a (patch)
treea5e161de861f92dd11d1809ffcf1b6b1a540e0da /drivers
parent4dc977e25f4b23e188f8459157df4dd9a7fb0ced (diff)
gpu: nvgpu: Use timeout retry API in mm_gk20a.c
Use the retry API that is part of the nvgpu timeout API to keep track of retry attempts in the vrious flushing and invalidating operations in the MM code. Bug 1799159 Change-Id: I36e98d37183a13d7c3183262629f8569f64fe4d7 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1255866 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c71
1 files changed, 34 insertions, 37 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 4c5deef5..7ecef39b 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -30,8 +30,11 @@
30#include <linux/fdtable.h> 30#include <linux/fdtable.h>
31#include <uapi/linux/nvgpu.h> 31#include <uapi/linux/nvgpu.h>
32#include <trace/events/gk20a.h> 32#include <trace/events/gk20a.h>
33
33#include <gk20a/page_allocator_priv.h> 34#include <gk20a/page_allocator_priv.h>
34 35
36#include <nvgpu/timers.h>
37
35#include "gk20a.h" 38#include "gk20a.h"
36#include "mm_gk20a.h" 39#include "mm_gk20a.h"
37#include "fence_gk20a.h" 40#include "fence_gk20a.h"
@@ -5011,8 +5014,8 @@ int gk20a_mm_fb_flush(struct gk20a *g)
5011{ 5014{
5012 struct mm_gk20a *mm = &g->mm; 5015 struct mm_gk20a *mm = &g->mm;
5013 u32 data; 5016 u32 data;
5014 s32 retry = 100;
5015 int ret = 0; 5017 int ret = 0;
5018 struct nvgpu_timeout timeout;
5016 5019
5017 gk20a_dbg_fn(""); 5020 gk20a_dbg_fn("");
5018 5021
@@ -5022,6 +5025,8 @@ int gk20a_mm_fb_flush(struct gk20a *g)
5022 return 0; 5025 return 0;
5023 } 5026 }
5024 5027
5028 nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
5029
5025 mutex_lock(&mm->l2_op_lock); 5030 mutex_lock(&mm->l2_op_lock);
5026 5031
5027 /* Make sure all previous writes are committed to the L2. There's no 5032 /* Make sure all previous writes are committed to the L2. There's no
@@ -5041,15 +5046,12 @@ int gk20a_mm_fb_flush(struct gk20a *g)
5041 flush_fb_flush_pending_v(data) == 5046 flush_fb_flush_pending_v(data) ==
5042 flush_fb_flush_pending_busy_v()) { 5047 flush_fb_flush_pending_busy_v()) {
5043 gk20a_dbg_info("fb_flush 0x%x", data); 5048 gk20a_dbg_info("fb_flush 0x%x", data);
5044 retry--;
5045 udelay(5); 5049 udelay(5);
5046 } else 5050 } else
5047 break; 5051 break;
5048 } while (retry >= 0 || !tegra_platform_is_silicon()); 5052 } while (nvgpu_timeout_check(&timeout));
5049 5053
5050 if (tegra_platform_is_silicon() && retry < 0) { 5054 if (nvgpu_timeout_peek(&timeout)) {
5051 gk20a_warn(dev_from_gk20a(g),
5052 "fb_flush too many retries");
5053 if (g->ops.fb.dump_vpr_wpr_info) 5055 if (g->ops.fb.dump_vpr_wpr_info)
5054 g->ops.fb.dump_vpr_wpr_info(g); 5056 g->ops.fb.dump_vpr_wpr_info(g);
5055 ret = -EBUSY; 5057 ret = -EBUSY;
@@ -5067,10 +5069,12 @@ int gk20a_mm_fb_flush(struct gk20a *g)
5067static void gk20a_mm_l2_invalidate_locked(struct gk20a *g) 5069static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
5068{ 5070{
5069 u32 data; 5071 u32 data;
5070 s32 retry = 200; 5072 struct nvgpu_timeout timeout;
5071 5073
5072 trace_gk20a_mm_l2_invalidate(dev_name(g->dev)); 5074 trace_gk20a_mm_l2_invalidate(dev_name(g->dev));
5073 5075
5076 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
5077
5074 /* Invalidate any clean lines from the L2 so subsequent reads go to 5078 /* Invalidate any clean lines from the L2 so subsequent reads go to
5075 DRAM. Dirty lines are not affected by this operation. */ 5079 DRAM. Dirty lines are not affected by this operation. */
5076 gk20a_writel(g, flush_l2_system_invalidate_r(), 5080 gk20a_writel(g, flush_l2_system_invalidate_r(),
@@ -5085,13 +5089,12 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
5085 flush_l2_system_invalidate_pending_busy_v()) { 5089 flush_l2_system_invalidate_pending_busy_v()) {
5086 gk20a_dbg_info("l2_system_invalidate 0x%x", 5090 gk20a_dbg_info("l2_system_invalidate 0x%x",
5087 data); 5091 data);
5088 retry--;
5089 udelay(5); 5092 udelay(5);
5090 } else 5093 } else
5091 break; 5094 break;
5092 } while (retry >= 0 || !tegra_platform_is_silicon()); 5095 } while (nvgpu_timeout_check(&timeout));
5093 5096
5094 if (tegra_platform_is_silicon() && retry < 0) 5097 if (nvgpu_timeout_peek(&timeout))
5095 gk20a_warn(dev_from_gk20a(g), 5098 gk20a_warn(dev_from_gk20a(g),
5096 "l2_system_invalidate too many retries"); 5099 "l2_system_invalidate too many retries");
5097 5100
@@ -5114,7 +5117,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
5114{ 5117{
5115 struct mm_gk20a *mm = &g->mm; 5118 struct mm_gk20a *mm = &g->mm;
5116 u32 data; 5119 u32 data;
5117 s32 retry = 2000; 5120 struct nvgpu_timeout timeout;
5118 5121
5119 gk20a_dbg_fn(""); 5122 gk20a_dbg_fn("");
5120 5123
@@ -5122,6 +5125,8 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
5122 if (!g->power_on) 5125 if (!g->power_on)
5123 goto hw_was_off; 5126 goto hw_was_off;
5124 5127
5128 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
5129
5125 mutex_lock(&mm->l2_op_lock); 5130 mutex_lock(&mm->l2_op_lock);
5126 5131
5127 trace_gk20a_mm_l2_flush(dev_name(g->dev)); 5132 trace_gk20a_mm_l2_flush(dev_name(g->dev));
@@ -5139,15 +5144,11 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
5139 flush_l2_flush_dirty_pending_v(data) == 5144 flush_l2_flush_dirty_pending_v(data) ==
5140 flush_l2_flush_dirty_pending_busy_v()) { 5145 flush_l2_flush_dirty_pending_busy_v()) {
5141 gk20a_dbg_info("l2_flush_dirty 0x%x", data); 5146 gk20a_dbg_info("l2_flush_dirty 0x%x", data);
5142 retry--;
5143 udelay(5); 5147 udelay(5);
5144 } else 5148 } else
5145 break; 5149 break;
5146 } while (retry >= 0 || !tegra_platform_is_silicon()); 5150 } while (nvgpu_timeout_check_msg(&timeout,
5147 5151 "l2_flush_dirty too many retries"));
5148 if (tegra_platform_is_silicon() && retry < 0)
5149 gk20a_warn(dev_from_gk20a(g),
5150 "l2_flush_dirty too many retries");
5151 5152
5152 trace_gk20a_mm_l2_flush_done(dev_name(g->dev)); 5153 trace_gk20a_mm_l2_flush_done(dev_name(g->dev));
5153 5154
@@ -5164,7 +5165,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
5164{ 5165{
5165 struct mm_gk20a *mm = &g->mm; 5166 struct mm_gk20a *mm = &g->mm;
5166 u32 data; 5167 u32 data;
5167 s32 retry = 200; 5168 struct nvgpu_timeout timeout;
5168 5169
5169 gk20a_dbg_fn(""); 5170 gk20a_dbg_fn("");
5170 5171
@@ -5172,6 +5173,8 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
5172 if (!g->power_on) 5173 if (!g->power_on)
5173 goto hw_was_off; 5174 goto hw_was_off;
5174 5175
5176 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
5177
5175 mutex_lock(&mm->l2_op_lock); 5178 mutex_lock(&mm->l2_op_lock);
5176 5179
5177 /* Flush all dirty lines from the CBC to L2 */ 5180 /* Flush all dirty lines from the CBC to L2 */
@@ -5186,15 +5189,11 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
5186 flush_l2_clean_comptags_pending_v(data) == 5189 flush_l2_clean_comptags_pending_v(data) ==
5187 flush_l2_clean_comptags_pending_busy_v()) { 5190 flush_l2_clean_comptags_pending_busy_v()) {
5188 gk20a_dbg_info("l2_clean_comptags 0x%x", data); 5191 gk20a_dbg_info("l2_clean_comptags 0x%x", data);
5189 retry--;
5190 udelay(5); 5192 udelay(5);
5191 } else 5193 } else
5192 break; 5194 break;
5193 } while (retry >= 0 || !tegra_platform_is_silicon()); 5195 } while (nvgpu_timeout_check_msg(&timeout,
5194 5196 "l2_clean_comptags too many retries"));
5195 if (tegra_platform_is_silicon() && retry < 0)
5196 gk20a_warn(dev_from_gk20a(g),
5197 "l2_clean_comptags too many retries");
5198 5197
5199 mutex_unlock(&mm->l2_op_lock); 5198 mutex_unlock(&mm->l2_op_lock);
5200 5199
@@ -5230,9 +5229,10 @@ int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
5230void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm) 5229void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
5231{ 5230{
5232 struct gk20a *g = gk20a_from_vm(vm); 5231 struct gk20a *g = gk20a_from_vm(vm);
5232 struct nvgpu_timeout timeout;
5233 u32 addr_lo; 5233 u32 addr_lo;
5234 u32 data; 5234 u32 data;
5235 s32 retry = 2000; 5235
5236 static DEFINE_MUTEX(tlb_lock); 5236 static DEFINE_MUTEX(tlb_lock);
5237 5237
5238 gk20a_dbg_fn(""); 5238 gk20a_dbg_fn("");
@@ -5252,19 +5252,20 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
5252 5252
5253 trace_gk20a_mm_tlb_invalidate(dev_name(g->dev)); 5253 trace_gk20a_mm_tlb_invalidate(dev_name(g->dev));
5254 5254
5255 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
5256
5255 do { 5257 do {
5256 data = gk20a_readl(g, fb_mmu_ctrl_r()); 5258 data = gk20a_readl(g, fb_mmu_ctrl_r());
5257 if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0) 5259 if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0)
5258 break; 5260 break;
5259 udelay(2); 5261 udelay(2);
5260 retry--; 5262 } while (nvgpu_timeout_check_msg(&timeout,
5261 } while (retry >= 0 || !tegra_platform_is_silicon()); 5263 "wait mmu fifo space"));
5262 5264
5263 if (tegra_platform_is_silicon() && retry < 0) { 5265 if (nvgpu_timeout_peek(&timeout))
5264 gk20a_warn(dev_from_gk20a(g),
5265 "wait mmu fifo space too many retries");
5266 goto out; 5266 goto out;
5267 } 5267
5268 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
5268 5269
5269 gk20a_writel(g, fb_mmu_invalidate_pdb_r(), 5270 gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
5270 fb_mmu_invalidate_pdb_addr_f(addr_lo) | 5271 fb_mmu_invalidate_pdb_addr_f(addr_lo) |
@@ -5281,13 +5282,9 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
5281 if (fb_mmu_ctrl_pri_fifo_empty_v(data) != 5282 if (fb_mmu_ctrl_pri_fifo_empty_v(data) !=
5282 fb_mmu_ctrl_pri_fifo_empty_false_f()) 5283 fb_mmu_ctrl_pri_fifo_empty_false_f())
5283 break; 5284 break;
5284 retry--;
5285 udelay(2); 5285 udelay(2);
5286 } while (retry >= 0 || !tegra_platform_is_silicon()); 5286 } while (nvgpu_timeout_check_msg(&timeout,
5287 5287 "wait mmu invalidate"));
5288 if (tegra_platform_is_silicon() && retry < 0)
5289 gk20a_warn(dev_from_gk20a(g),
5290 "mmu invalidate too many retries");
5291 5288
5292 trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev)); 5289 trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev));
5293 5290