aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-04-09 04:19:41 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-05-05 03:08:37 -0400
commite3efda49e736b8b0de3a5adb45e412cf90fdaf8d (patch)
tree6085ce56fa5b787fdb2382e81bff520583aae718 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent18393f6322ce523efa767e7ed9bd64fe0645e458 (diff)
drm/i915: Preserve ring buffers objects across resume
Tearing down the ring buffers across resume is overkill, risks unnecessary failure and increases fragmentation. After failure, since the device is still active we may end up trying to write into the dangling iomapping and trigger an oops. v2: stop_ringbuffers() was meant to call stop(ring) not cleanup(ring) during resume! Reported-by: Jae-hyeon Park <jhyeon@gmail.com> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=72351 References: https://bugs.freedesktop.org/show_bug.cgi?id=76554 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> [danvet: s/ring->obj == NULL/!intel_ring_initialized(ring)/ as suggested by Oscar.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c168
1 files changed, 87 insertions, 81 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 890e0986edbe..7d99e84f76a3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1305,45 +1305,39 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
1305 1305
1306static int init_status_page(struct intel_ring_buffer *ring) 1306static int init_status_page(struct intel_ring_buffer *ring)
1307{ 1307{
1308 struct drm_device *dev = ring->dev;
1309 struct drm_i915_gem_object *obj; 1308 struct drm_i915_gem_object *obj;
1310 int ret;
1311 1309
1312 obj = i915_gem_alloc_object(dev, 4096); 1310 if ((obj = ring->status_page.obj) == NULL) {
1313 if (obj == NULL) { 1311 int ret;
1314 DRM_ERROR("Failed to allocate status page\n");
1315 ret = -ENOMEM;
1316 goto err;
1317 }
1318 1312
1319 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1313 obj = i915_gem_alloc_object(ring->dev, 4096);
1320 if (ret) 1314 if (obj == NULL) {
1321 goto err_unref; 1315 DRM_ERROR("Failed to allocate status page\n");
1316 return -ENOMEM;
1317 }
1322 1318
1323 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1319 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1324 if (ret) 1320 if (ret)
1325 goto err_unref; 1321 goto err_unref;
1322
1323 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
1324 if (ret) {
1325err_unref:
1326 drm_gem_object_unreference(&obj->base);
1327 return ret;
1328 }
1329
1330 ring->status_page.obj = obj;
1331 }
1326 1332
1327 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1333 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1328 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1334 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1329 if (ring->status_page.page_addr == NULL) {
1330 ret = -ENOMEM;
1331 goto err_unpin;
1332 }
1333 ring->status_page.obj = obj;
1334 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1335 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1335 1336
1336 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1337 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1337 ring->name, ring->status_page.gfx_addr); 1338 ring->name, ring->status_page.gfx_addr);
1338 1339
1339 return 0; 1340 return 0;
1340
1341err_unpin:
1342 i915_gem_object_ggtt_unpin(obj);
1343err_unref:
1344 drm_gem_object_unreference(&obj->base);
1345err:
1346 return ret;
1347} 1341}
1348 1342
1349static int init_phys_status_page(struct intel_ring_buffer *ring) 1343static int init_phys_status_page(struct intel_ring_buffer *ring)
@@ -1363,44 +1357,23 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)
1363 return 0; 1357 return 0;
1364} 1358}
1365 1359
1366static int intel_init_ring_buffer(struct drm_device *dev, 1360static int allocate_ring_buffer(struct intel_ring_buffer *ring)
1367 struct intel_ring_buffer *ring)
1368{ 1361{
1362 struct drm_device *dev = ring->dev;
1363 struct drm_i915_private *dev_priv = to_i915(dev);
1369 struct drm_i915_gem_object *obj; 1364 struct drm_i915_gem_object *obj;
1370 struct drm_i915_private *dev_priv = dev->dev_private;
1371 int ret; 1365 int ret;
1372 1366
1373 ring->dev = dev; 1367 if (ring->obj)
1374 INIT_LIST_HEAD(&ring->active_list); 1368 return 0;
1375 INIT_LIST_HEAD(&ring->request_list);
1376 ring->size = 32 * PAGE_SIZE;
1377 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1378
1379 init_waitqueue_head(&ring->irq_queue);
1380
1381 if (I915_NEED_GFX_HWS(dev)) {
1382 ret = init_status_page(ring);
1383 if (ret)
1384 return ret;
1385 } else {
1386 BUG_ON(ring->id != RCS);
1387 ret = init_phys_status_page(ring);
1388 if (ret)
1389 return ret;
1390 }
1391 1369
1392 obj = NULL; 1370 obj = NULL;
1393 if (!HAS_LLC(dev)) 1371 if (!HAS_LLC(dev))
1394 obj = i915_gem_object_create_stolen(dev, ring->size); 1372 obj = i915_gem_object_create_stolen(dev, ring->size);
1395 if (obj == NULL) 1373 if (obj == NULL)
1396 obj = i915_gem_alloc_object(dev, ring->size); 1374 obj = i915_gem_alloc_object(dev, ring->size);
1397 if (obj == NULL) { 1375 if (obj == NULL)
1398 DRM_ERROR("Failed to allocate ringbuffer\n"); 1376 return -ENOMEM;
1399 ret = -ENOMEM;
1400 goto err_hws;
1401 }
1402
1403 ring->obj = obj;
1404 1377
1405 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1378 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1406 if (ret) 1379 if (ret)
@@ -1414,55 +1387,72 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1414 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 1387 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1415 ring->size); 1388 ring->size);
1416 if (ring->virtual_start == NULL) { 1389 if (ring->virtual_start == NULL) {
1417 DRM_ERROR("Failed to map ringbuffer.\n");
1418 ret = -EINVAL; 1390 ret = -EINVAL;
1419 goto err_unpin; 1391 goto err_unpin;
1420 } 1392 }
1421 1393
1422 ret = ring->init(ring); 1394 ring->obj = obj;
1423 if (ret) 1395 return 0;
1424 goto err_unmap; 1396
1397err_unpin:
1398 i915_gem_object_ggtt_unpin(obj);
1399err_unref:
1400 drm_gem_object_unreference(&obj->base);
1401 return ret;
1402}
1403
1404static int intel_init_ring_buffer(struct drm_device *dev,
1405 struct intel_ring_buffer *ring)
1406{
1407 int ret;
1408
1409 ring->dev = dev;
1410 INIT_LIST_HEAD(&ring->active_list);
1411 INIT_LIST_HEAD(&ring->request_list);
1412 ring->size = 32 * PAGE_SIZE;
1413 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1414
1415 init_waitqueue_head(&ring->irq_queue);
1416
1417 if (I915_NEED_GFX_HWS(dev)) {
1418 ret = init_status_page(ring);
1419 if (ret)
1420 return ret;
1421 } else {
1422 BUG_ON(ring->id != RCS);
1423 ret = init_phys_status_page(ring);
1424 if (ret)
1425 return ret;
1426 }
1427
1428 ret = allocate_ring_buffer(ring);
1429 if (ret) {
1430 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1431 return ret;
1432 }
1425 1433
1426 /* Workaround an erratum on the i830 which causes a hang if 1434 /* Workaround an erratum on the i830 which causes a hang if
1427 * the TAIL pointer points to within the last 2 cachelines 1435 * the TAIL pointer points to within the last 2 cachelines
1428 * of the buffer. 1436 * of the buffer.
1429 */ 1437 */
1430 ring->effective_size = ring->size; 1438 ring->effective_size = ring->size;
1431 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1439 if (IS_I830(dev) || IS_845G(dev))
1432 ring->effective_size -= 2 * CACHELINE_BYTES; 1440 ring->effective_size -= 2 * CACHELINE_BYTES;
1433 1441
1434 i915_cmd_parser_init_ring(ring); 1442 i915_cmd_parser_init_ring(ring);
1435 1443
1436 return 0; 1444 return ring->init(ring);
1437
1438err_unmap:
1439 iounmap(ring->virtual_start);
1440err_unpin:
1441 i915_gem_object_ggtt_unpin(obj);
1442err_unref:
1443 drm_gem_object_unreference(&obj->base);
1444 ring->obj = NULL;
1445err_hws:
1446 cleanup_status_page(ring);
1447 return ret;
1448} 1445}
1449 1446
1450void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) 1447void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1451{ 1448{
1452 struct drm_i915_private *dev_priv; 1449 struct drm_i915_private *dev_priv = to_i915(ring->dev);
1453 int ret;
1454 1450
1455 if (ring->obj == NULL) 1451 if (ring->obj == NULL)
1456 return; 1452 return;
1457 1453
1458 /* Disable the ring buffer. The ring must be idle at this point */ 1454 intel_stop_ring_buffer(ring);
1459 dev_priv = ring->dev->dev_private; 1455 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1460 ret = intel_ring_idle(ring);
1461 if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1462 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1463 ring->name, ret);
1464
1465 I915_WRITE_CTL(ring, 0);
1466 1456
1467 iounmap(ring->virtual_start); 1457 iounmap(ring->virtual_start);
1468 1458
@@ -2252,3 +2242,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
2252 ring->gpu_caches_dirty = false; 2242 ring->gpu_caches_dirty = false;
2253 return 0; 2243 return 0;
2254} 2244}
2245
2246void
2247intel_stop_ring_buffer(struct intel_ring_buffer *ring)
2248{
2249 int ret;
2250
2251 if (!intel_ring_initialized(ring))
2252 return;
2253
2254 ret = intel_ring_idle(ring);
2255 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2256 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2257 ring->name, ret);
2258
2259 stop_ring(ring);
2260}