aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorMika Kuoppala <mika.kuoppala@linux.intel.com>2014-01-30 12:04:44 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-02-04 05:57:29 -0500
commit939fd762083f988be271da8c96398178daf9baf0 (patch)
tree3e68989a5a49750a15e062617e2a8ade3d938a79 /drivers/gpu/drm/i915/i915_gem.c
parentb6b0fac04de9ae9b1559eddf8e9490f3c9a01885 (diff)
drm/i915: Get rid of acthd based guilty batch search
As we seek the guilty batch using request and hangcheck score, this code is not needed anymore. v2: Rebase. Passing dev_priv instead of getting it from last_ring Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> (v1) Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c97
1 files changed, 6 insertions, 91 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37c2ea45f6db..d230c3bbb4ff 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2241,74 +2241,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2241 spin_unlock(&file_priv->mm.lock); 2241 spin_unlock(&file_priv->mm.lock);
2242} 2242}
2243 2243
2244static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, 2244static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2245 struct i915_address_space *vm)
2246{
2247 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2248 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2249 return true;
2250
2251 return false;
2252}
2253
2254static bool i915_head_inside_request(const u32 acthd_unmasked,
2255 const u32 request_start,
2256 const u32 request_end)
2257{
2258 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2259
2260 if (request_start < request_end) {
2261 if (acthd >= request_start && acthd < request_end)
2262 return true;
2263 } else if (request_start > request_end) {
2264 if (acthd >= request_start || acthd < request_end)
2265 return true;
2266 }
2267
2268 return false;
2269}
2270
2271static struct i915_address_space *
2272request_to_vm(struct drm_i915_gem_request *request)
2273{
2274 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2275 struct i915_address_space *vm;
2276
2277 if (request->ctx)
2278 vm = request->ctx->vm;
2279 else
2280 vm = &dev_priv->gtt.base;
2281
2282 return vm;
2283}
2284
2285static bool i915_request_guilty(struct drm_i915_gem_request *request,
2286 const u32 acthd, bool *inside)
2287{
2288 /* There is a possibility that unmasked head address
2289 * pointing inside the ring, matches the batch_obj address range.
2290 * However this is extremely unlikely.
2291 */
2292 if (request->batch_obj) {
2293 if (i915_head_inside_object(acthd, request->batch_obj,
2294 request_to_vm(request))) {
2295 *inside = true;
2296 return true;
2297 }
2298 }
2299
2300 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2301 *inside = false;
2302 return true;
2303 }
2304
2305 return false;
2306}
2307
2308static bool i915_context_is_banned(struct drm_device *dev,
2309 const struct i915_hw_context *ctx) 2245 const struct i915_hw_context *ctx)
2310{ 2246{
2311 struct drm_i915_private *dev_priv = to_i915(dev);
2312 unsigned long elapsed; 2247 unsigned long elapsed;
2313 2248
2314 elapsed = get_seconds() - ctx->hang_stats.guilty_ts; 2249 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
@@ -2330,39 +2265,19 @@ static bool i915_context_is_banned(struct drm_device *dev,
2330 return false; 2265 return false;
2331} 2266}
2332 2267
2333static void i915_set_reset_status(struct intel_ring_buffer *ring, 2268static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2334 struct drm_i915_gem_request *request, 2269 struct i915_hw_context *ctx,
2335 const bool guilty) 2270 const bool guilty)
2336{ 2271{
2337 const u32 acthd = intel_ring_get_active_head(ring);
2338 bool inside;
2339 unsigned long offset = 0;
2340 struct i915_hw_context *ctx = request->ctx;
2341 struct i915_ctx_hang_stats *hs; 2272 struct i915_ctx_hang_stats *hs;
2342 2273
2343 if (WARN_ON(!ctx)) 2274 if (WARN_ON(!ctx))
2344 return; 2275 return;
2345 2276
2346 if (request->batch_obj)
2347 offset = i915_gem_obj_offset(request->batch_obj,
2348 request_to_vm(request));
2349
2350 if (guilty &&
2351 i915_request_guilty(request, acthd, &inside)) {
2352 DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2353 ring->name,
2354 inside ? "inside" : "flushing",
2355 offset,
2356 ctx->id,
2357 acthd);
2358 }
2359
2360 WARN_ON(!ctx->last_ring);
2361
2362 hs = &ctx->hang_stats; 2277 hs = &ctx->hang_stats;
2363 2278
2364 if (guilty) { 2279 if (guilty) {
2365 hs->banned = i915_context_is_banned(ring->dev, ctx); 2280 hs->banned = i915_context_is_banned(dev_priv, ctx);
2366 hs->batch_active++; 2281 hs->batch_active++;
2367 hs->guilty_ts = get_seconds(); 2282 hs->guilty_ts = get_seconds();
2368 } else { 2283 } else {
@@ -2410,10 +2325,10 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2410 2325
2411 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 2326 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2412 2327
2413 i915_set_reset_status(ring, request, ring_hung); 2328 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2414 2329
2415 list_for_each_entry_continue(request, &ring->request_list, list) 2330 list_for_each_entry_continue(request, &ring->request_list, list)
2416 i915_set_reset_status(ring, request, false); 2331 i915_set_reset_status(dev_priv, request->ctx, false);
2417} 2332}
2418 2333
2419static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2334static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,