diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 168 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 1 |
3 files changed, 105 insertions, 84 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5c8b86196c84..f766d5f94d93 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2384,6 +2384,11 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
2384 | 2384 | ||
2385 | i915_gem_free_request(request); | 2385 | i915_gem_free_request(request); |
2386 | } | 2386 | } |
2387 | |||
2388 | /* These may not have been flush before the reset, do so now */ | ||
2389 | kfree(ring->preallocated_lazy_request); | ||
2390 | ring->preallocated_lazy_request = NULL; | ||
2391 | ring->outstanding_lazy_seqno = 0; | ||
2387 | } | 2392 | } |
2388 | 2393 | ||
2389 | void i915_gem_restore_fences(struct drm_device *dev) | 2394 | void i915_gem_restore_fences(struct drm_device *dev) |
@@ -2424,8 +2429,6 @@ void i915_gem_reset(struct drm_device *dev) | |||
2424 | for_each_ring(ring, dev_priv, i) | 2429 | for_each_ring(ring, dev_priv, i) |
2425 | i915_gem_reset_ring_cleanup(dev_priv, ring); | 2430 | i915_gem_reset_ring_cleanup(dev_priv, ring); |
2426 | 2431 | ||
2427 | i915_gem_cleanup_ringbuffer(dev); | ||
2428 | |||
2429 | i915_gem_context_reset(dev); | 2432 | i915_gem_context_reset(dev); |
2430 | 2433 | ||
2431 | i915_gem_restore_fences(dev); | 2434 | i915_gem_restore_fences(dev); |
@@ -4233,6 +4236,17 @@ void i915_gem_vma_destroy(struct i915_vma *vma) | |||
4233 | kfree(vma); | 4236 | kfree(vma); |
4234 | } | 4237 | } |
4235 | 4238 | ||
4239 | static void | ||
4240 | i915_gem_stop_ringbuffers(struct drm_device *dev) | ||
4241 | { | ||
4242 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4243 | struct intel_ring_buffer *ring; | ||
4244 | int i; | ||
4245 | |||
4246 | for_each_ring(ring, dev_priv, i) | ||
4247 | intel_stop_ring_buffer(ring); | ||
4248 | } | ||
4249 | |||
4236 | int | 4250 | int |
4237 | i915_gem_suspend(struct drm_device *dev) | 4251 | i915_gem_suspend(struct drm_device *dev) |
4238 | { | 4252 | { |
@@ -4254,7 +4268,7 @@ i915_gem_suspend(struct drm_device *dev) | |||
4254 | i915_gem_evict_everything(dev); | 4268 | i915_gem_evict_everything(dev); |
4255 | 4269 | ||
4256 | i915_kernel_lost_context(dev); | 4270 | i915_kernel_lost_context(dev); |
4257 | i915_gem_cleanup_ringbuffer(dev); | 4271 | i915_gem_stop_ringbuffers(dev); |
4258 | 4272 | ||
4259 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | 4273 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
4260 | * We need to replace this with a semaphore, or something. | 4274 | * We need to replace this with a semaphore, or something. |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 890e0986edbe..7d99e84f76a3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1305,45 +1305,39 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) | |||
1305 | 1305 | ||
1306 | static int init_status_page(struct intel_ring_buffer *ring) | 1306 | static int init_status_page(struct intel_ring_buffer *ring) |
1307 | { | 1307 | { |
1308 | struct drm_device *dev = ring->dev; | ||
1309 | struct drm_i915_gem_object *obj; | 1308 | struct drm_i915_gem_object *obj; |
1310 | int ret; | ||
1311 | 1309 | ||
1312 | obj = i915_gem_alloc_object(dev, 4096); | 1310 | if ((obj = ring->status_page.obj) == NULL) { |
1313 | if (obj == NULL) { | 1311 | int ret; |
1314 | DRM_ERROR("Failed to allocate status page\n"); | ||
1315 | ret = -ENOMEM; | ||
1316 | goto err; | ||
1317 | } | ||
1318 | 1312 | ||
1319 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | 1313 | obj = i915_gem_alloc_object(ring->dev, 4096); |
1320 | if (ret) | 1314 | if (obj == NULL) { |
1321 | goto err_unref; | 1315 | DRM_ERROR("Failed to allocate status page\n"); |
1316 | return -ENOMEM; | ||
1317 | } | ||
1322 | 1318 | ||
1323 | ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); | 1319 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
1324 | if (ret) | 1320 | if (ret) |
1325 | goto err_unref; | 1321 | goto err_unref; |
1322 | |||
1323 | ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); | ||
1324 | if (ret) { | ||
1325 | err_unref: | ||
1326 | drm_gem_object_unreference(&obj->base); | ||
1327 | return ret; | ||
1328 | } | ||
1329 | |||
1330 | ring->status_page.obj = obj; | ||
1331 | } | ||
1326 | 1332 | ||
1327 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); | 1333 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); |
1328 | ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); | 1334 | ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); |
1329 | if (ring->status_page.page_addr == NULL) { | ||
1330 | ret = -ENOMEM; | ||
1331 | goto err_unpin; | ||
1332 | } | ||
1333 | ring->status_page.obj = obj; | ||
1334 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1335 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1335 | 1336 | ||
1336 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 1337 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
1337 | ring->name, ring->status_page.gfx_addr); | 1338 | ring->name, ring->status_page.gfx_addr); |
1338 | 1339 | ||
1339 | return 0; | 1340 | return 0; |
1340 | |||
1341 | err_unpin: | ||
1342 | i915_gem_object_ggtt_unpin(obj); | ||
1343 | err_unref: | ||
1344 | drm_gem_object_unreference(&obj->base); | ||
1345 | err: | ||
1346 | return ret; | ||
1347 | } | 1341 | } |
1348 | 1342 | ||
1349 | static int init_phys_status_page(struct intel_ring_buffer *ring) | 1343 | static int init_phys_status_page(struct intel_ring_buffer *ring) |
@@ -1363,44 +1357,23 @@ static int init_phys_status_page(struct intel_ring_buffer *ring) | |||
1363 | return 0; | 1357 | return 0; |
1364 | } | 1358 | } |
1365 | 1359 | ||
1366 | static int intel_init_ring_buffer(struct drm_device *dev, | 1360 | static int allocate_ring_buffer(struct intel_ring_buffer *ring) |
1367 | struct intel_ring_buffer *ring) | ||
1368 | { | 1361 | { |
1362 | struct drm_device *dev = ring->dev; | ||
1363 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1369 | struct drm_i915_gem_object *obj; | 1364 | struct drm_i915_gem_object *obj; |
1370 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1371 | int ret; | 1365 | int ret; |
1372 | 1366 | ||
1373 | ring->dev = dev; | 1367 | if (ring->obj) |
1374 | INIT_LIST_HEAD(&ring->active_list); | 1368 | return 0; |
1375 | INIT_LIST_HEAD(&ring->request_list); | ||
1376 | ring->size = 32 * PAGE_SIZE; | ||
1377 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); | ||
1378 | |||
1379 | init_waitqueue_head(&ring->irq_queue); | ||
1380 | |||
1381 | if (I915_NEED_GFX_HWS(dev)) { | ||
1382 | ret = init_status_page(ring); | ||
1383 | if (ret) | ||
1384 | return ret; | ||
1385 | } else { | ||
1386 | BUG_ON(ring->id != RCS); | ||
1387 | ret = init_phys_status_page(ring); | ||
1388 | if (ret) | ||
1389 | return ret; | ||
1390 | } | ||
1391 | 1369 | ||
1392 | obj = NULL; | 1370 | obj = NULL; |
1393 | if (!HAS_LLC(dev)) | 1371 | if (!HAS_LLC(dev)) |
1394 | obj = i915_gem_object_create_stolen(dev, ring->size); | 1372 | obj = i915_gem_object_create_stolen(dev, ring->size); |
1395 | if (obj == NULL) | 1373 | if (obj == NULL) |
1396 | obj = i915_gem_alloc_object(dev, ring->size); | 1374 | obj = i915_gem_alloc_object(dev, ring->size); |
1397 | if (obj == NULL) { | 1375 | if (obj == NULL) |
1398 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 1376 | return -ENOMEM; |
1399 | ret = -ENOMEM; | ||
1400 | goto err_hws; | ||
1401 | } | ||
1402 | |||
1403 | ring->obj = obj; | ||
1404 | 1377 | ||
1405 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); | 1378 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); |
1406 | if (ret) | 1379 | if (ret) |
@@ -1414,55 +1387,72 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1414 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), | 1387 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
1415 | ring->size); | 1388 | ring->size); |
1416 | if (ring->virtual_start == NULL) { | 1389 | if (ring->virtual_start == NULL) { |
1417 | DRM_ERROR("Failed to map ringbuffer.\n"); | ||
1418 | ret = -EINVAL; | 1390 | ret = -EINVAL; |
1419 | goto err_unpin; | 1391 | goto err_unpin; |
1420 | } | 1392 | } |
1421 | 1393 | ||
1422 | ret = ring->init(ring); | 1394 | ring->obj = obj; |
1423 | if (ret) | 1395 | return 0; |
1424 | goto err_unmap; | 1396 | |
1397 | err_unpin: | ||
1398 | i915_gem_object_ggtt_unpin(obj); | ||
1399 | err_unref: | ||
1400 | drm_gem_object_unreference(&obj->base); | ||
1401 | return ret; | ||
1402 | } | ||
1403 | |||
1404 | static int intel_init_ring_buffer(struct drm_device *dev, | ||
1405 | struct intel_ring_buffer *ring) | ||
1406 | { | ||
1407 | int ret; | ||
1408 | |||
1409 | ring->dev = dev; | ||
1410 | INIT_LIST_HEAD(&ring->active_list); | ||
1411 | INIT_LIST_HEAD(&ring->request_list); | ||
1412 | ring->size = 32 * PAGE_SIZE; | ||
1413 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); | ||
1414 | |||
1415 | init_waitqueue_head(&ring->irq_queue); | ||
1416 | |||
1417 | if (I915_NEED_GFX_HWS(dev)) { | ||
1418 | ret = init_status_page(ring); | ||
1419 | if (ret) | ||
1420 | return ret; | ||
1421 | } else { | ||
1422 | BUG_ON(ring->id != RCS); | ||
1423 | ret = init_phys_status_page(ring); | ||
1424 | if (ret) | ||
1425 | return ret; | ||
1426 | } | ||
1427 | |||
1428 | ret = allocate_ring_buffer(ring); | ||
1429 | if (ret) { | ||
1430 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); | ||
1431 | return ret; | ||
1432 | } | ||
1425 | 1433 | ||
1426 | /* Workaround an erratum on the i830 which causes a hang if | 1434 | /* Workaround an erratum on the i830 which causes a hang if |
1427 | * the TAIL pointer points to within the last 2 cachelines | 1435 | * the TAIL pointer points to within the last 2 cachelines |
1428 | * of the buffer. | 1436 | * of the buffer. |
1429 | */ | 1437 | */ |
1430 | ring->effective_size = ring->size; | 1438 | ring->effective_size = ring->size; |
1431 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 1439 | if (IS_I830(dev) || IS_845G(dev)) |
1432 | ring->effective_size -= 2 * CACHELINE_BYTES; | 1440 | ring->effective_size -= 2 * CACHELINE_BYTES; |
1433 | 1441 | ||
1434 | i915_cmd_parser_init_ring(ring); | 1442 | i915_cmd_parser_init_ring(ring); |
1435 | 1443 | ||
1436 | return 0; | 1444 | return ring->init(ring); |
1437 | |||
1438 | err_unmap: | ||
1439 | iounmap(ring->virtual_start); | ||
1440 | err_unpin: | ||
1441 | i915_gem_object_ggtt_unpin(obj); | ||
1442 | err_unref: | ||
1443 | drm_gem_object_unreference(&obj->base); | ||
1444 | ring->obj = NULL; | ||
1445 | err_hws: | ||
1446 | cleanup_status_page(ring); | ||
1447 | return ret; | ||
1448 | } | 1445 | } |
1449 | 1446 | ||
1450 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | 1447 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
1451 | { | 1448 | { |
1452 | struct drm_i915_private *dev_priv; | 1449 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
1453 | int ret; | ||
1454 | 1450 | ||
1455 | if (ring->obj == NULL) | 1451 | if (ring->obj == NULL) |
1456 | return; | 1452 | return; |
1457 | 1453 | ||
1458 | /* Disable the ring buffer. The ring must be idle at this point */ | 1454 | intel_stop_ring_buffer(ring); |
1459 | dev_priv = ring->dev->dev_private; | 1455 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); |
1460 | ret = intel_ring_idle(ring); | ||
1461 | if (ret && !i915_reset_in_progress(&dev_priv->gpu_error)) | ||
1462 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | ||
1463 | ring->name, ret); | ||
1464 | |||
1465 | I915_WRITE_CTL(ring, 0); | ||
1466 | 1456 | ||
1467 | iounmap(ring->virtual_start); | 1457 | iounmap(ring->virtual_start); |
1468 | 1458 | ||
@@ -2252,3 +2242,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) | |||
2252 | ring->gpu_caches_dirty = false; | 2242 | ring->gpu_caches_dirty = false; |
2253 | return 0; | 2243 | return 0; |
2254 | } | 2244 | } |
2245 | |||
2246 | void | ||
2247 | intel_stop_ring_buffer(struct intel_ring_buffer *ring) | ||
2248 | { | ||
2249 | int ret; | ||
2250 | |||
2251 | if (!intel_ring_initialized(ring)) | ||
2252 | return; | ||
2253 | |||
2254 | ret = intel_ring_idle(ring); | ||
2255 | if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) | ||
2256 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | ||
2257 | ring->name, ret); | ||
2258 | |||
2259 | stop_ring(ring); | ||
2260 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 413cdc74ed53..54839165eb6d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -263,6 +263,7 @@ intel_write_status_page(struct intel_ring_buffer *ring, | |||
263 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 | 263 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 |
264 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | 264 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
265 | 265 | ||
266 | void intel_stop_ring_buffer(struct intel_ring_buffer *ring); | ||
266 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | 267 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
267 | 268 | ||
268 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 269 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |