diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 528 |
1 files changed, 274 insertions, 254 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 21d0dea57312..8f50919ba9b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -128,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) | |||
128 | 128 | ||
129 | int i915_mutex_lock_interruptible(struct drm_device *dev) | 129 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
130 | { | 130 | { |
131 | struct drm_i915_private *dev_priv = dev->dev_private; | 131 | struct drm_i915_private *dev_priv = to_i915(dev); |
132 | int ret; | 132 | int ret; |
133 | 133 | ||
134 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); | 134 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
@@ -377,13 +377,13 @@ out: | |||
377 | 377 | ||
378 | void *i915_gem_object_alloc(struct drm_device *dev) | 378 | void *i915_gem_object_alloc(struct drm_device *dev) |
379 | { | 379 | { |
380 | struct drm_i915_private *dev_priv = dev->dev_private; | 380 | struct drm_i915_private *dev_priv = to_i915(dev); |
381 | return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); | 381 | return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); |
382 | } | 382 | } |
383 | 383 | ||
384 | void i915_gem_object_free(struct drm_i915_gem_object *obj) | 384 | void i915_gem_object_free(struct drm_i915_gem_object *obj) |
385 | { | 385 | { |
386 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 386 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
387 | kmem_cache_free(dev_priv->objects, obj); | 387 | kmem_cache_free(dev_priv->objects, obj); |
388 | } | 388 | } |
389 | 389 | ||
@@ -508,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, | |||
508 | 508 | ||
509 | *needs_clflush = 0; | 509 | *needs_clflush = 0; |
510 | 510 | ||
511 | if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) | 511 | if (WARN_ON(!i915_gem_object_has_struct_page(obj))) |
512 | return -EINVAL; | 512 | return -EINVAL; |
513 | 513 | ||
514 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { | 514 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { |
@@ -636,7 +636,7 @@ i915_gem_gtt_pread(struct drm_device *dev, | |||
636 | struct drm_i915_gem_object *obj, uint64_t size, | 636 | struct drm_i915_gem_object *obj, uint64_t size, |
637 | uint64_t data_offset, uint64_t data_ptr) | 637 | uint64_t data_offset, uint64_t data_ptr) |
638 | { | 638 | { |
639 | struct drm_i915_private *dev_priv = dev->dev_private; | 639 | struct drm_i915_private *dev_priv = to_i915(dev); |
640 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 640 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
641 | struct drm_mm_node node; | 641 | struct drm_mm_node node; |
642 | char __user *user_data; | 642 | char __user *user_data; |
@@ -760,7 +760,7 @@ i915_gem_shmem_pread(struct drm_device *dev, | |||
760 | int needs_clflush = 0; | 760 | int needs_clflush = 0; |
761 | struct sg_page_iter sg_iter; | 761 | struct sg_page_iter sg_iter; |
762 | 762 | ||
763 | if (!obj->base.filp) | 763 | if (!i915_gem_object_has_struct_page(obj)) |
764 | return -ENODEV; | 764 | return -ENODEV; |
765 | 765 | ||
766 | user_data = u64_to_user_ptr(args->data_ptr); | 766 | user_data = u64_to_user_ptr(args->data_ptr); |
@@ -1250,7 +1250,7 @@ int | |||
1250 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | 1250 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
1251 | struct drm_file *file) | 1251 | struct drm_file *file) |
1252 | { | 1252 | { |
1253 | struct drm_i915_private *dev_priv = dev->dev_private; | 1253 | struct drm_i915_private *dev_priv = to_i915(dev); |
1254 | struct drm_i915_gem_pwrite *args = data; | 1254 | struct drm_i915_gem_pwrite *args = data; |
1255 | struct drm_i915_gem_object *obj; | 1255 | struct drm_i915_gem_object *obj; |
1256 | int ret; | 1256 | int ret; |
@@ -1298,7 +1298,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1298 | * pread/pwrite currently are reading and writing from the CPU | 1298 | * pread/pwrite currently are reading and writing from the CPU |
1299 | * perspective, requiring manual detiling by the client. | 1299 | * perspective, requiring manual detiling by the client. |
1300 | */ | 1300 | */ |
1301 | if (!obj->base.filp || cpu_write_needs_clflush(obj)) { | 1301 | if (!i915_gem_object_has_struct_page(obj) || |
1302 | cpu_write_needs_clflush(obj)) { | ||
1302 | ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); | 1303 | ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); |
1303 | /* Note that the gtt paths might fail with non-page-backed user | 1304 | /* Note that the gtt paths might fail with non-page-backed user |
1304 | * pointers (e.g. gtt mappings when moving data between | 1305 | * pointers (e.g. gtt mappings when moving data between |
@@ -1308,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1308 | if (ret == -EFAULT) { | 1309 | if (ret == -EFAULT) { |
1309 | if (obj->phys_handle) | 1310 | if (obj->phys_handle) |
1310 | ret = i915_gem_phys_pwrite(obj, args, file); | 1311 | ret = i915_gem_phys_pwrite(obj, args, file); |
1311 | else if (obj->base.filp) | 1312 | else if (i915_gem_object_has_struct_page(obj)) |
1312 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); | 1313 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); |
1313 | else | 1314 | else |
1314 | ret = -ENODEV; | 1315 | ret = -ENODEV; |
@@ -1342,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible) | |||
1342 | return 0; | 1343 | return 0; |
1343 | } | 1344 | } |
1344 | 1345 | ||
1345 | static void fake_irq(unsigned long data) | ||
1346 | { | ||
1347 | wake_up_process((struct task_struct *)data); | ||
1348 | } | ||
1349 | |||
1350 | static bool missed_irq(struct drm_i915_private *dev_priv, | ||
1351 | struct intel_engine_cs *engine) | ||
1352 | { | ||
1353 | return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings); | ||
1354 | } | ||
1355 | |||
1356 | static unsigned long local_clock_us(unsigned *cpu) | 1346 | static unsigned long local_clock_us(unsigned *cpu) |
1357 | { | 1347 | { |
1358 | unsigned long t; | 1348 | unsigned long t; |
@@ -1385,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu) | |||
1385 | return this_cpu != cpu; | 1375 | return this_cpu != cpu; |
1386 | } | 1376 | } |
1387 | 1377 | ||
1388 | static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | 1378 | bool __i915_spin_request(const struct drm_i915_gem_request *req, |
1379 | int state, unsigned long timeout_us) | ||
1389 | { | 1380 | { |
1390 | unsigned long timeout; | ||
1391 | unsigned cpu; | 1381 | unsigned cpu; |
1392 | 1382 | ||
1393 | /* When waiting for high frequency requests, e.g. during synchronous | 1383 | /* When waiting for high frequency requests, e.g. during synchronous |
@@ -1400,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | |||
1400 | * takes to sleep on a request, on the order of a microsecond. | 1390 | * takes to sleep on a request, on the order of a microsecond. |
1401 | */ | 1391 | */ |
1402 | 1392 | ||
1403 | if (req->engine->irq_refcount) | 1393 | timeout_us += local_clock_us(&cpu); |
1404 | return -EBUSY; | 1394 | do { |
1405 | 1395 | if (i915_gem_request_completed(req)) | |
1406 | /* Only spin if we know the GPU is processing this request */ | 1396 | return true; |
1407 | if (!i915_gem_request_started(req, true)) | ||
1408 | return -EAGAIN; | ||
1409 | |||
1410 | timeout = local_clock_us(&cpu) + 5; | ||
1411 | while (!need_resched()) { | ||
1412 | if (i915_gem_request_completed(req, true)) | ||
1413 | return 0; | ||
1414 | 1397 | ||
1415 | if (signal_pending_state(state, current)) | 1398 | if (signal_pending_state(state, current)) |
1416 | break; | 1399 | break; |
1417 | 1400 | ||
1418 | if (busywait_stop(timeout, cpu)) | 1401 | if (busywait_stop(timeout_us, cpu)) |
1419 | break; | 1402 | break; |
1420 | 1403 | ||
1421 | cpu_relax_lowlatency(); | 1404 | cpu_relax_lowlatency(); |
1422 | } | 1405 | } while (!need_resched()); |
1423 | 1406 | ||
1424 | if (i915_gem_request_completed(req, false)) | 1407 | return false; |
1425 | return 0; | ||
1426 | |||
1427 | return -EAGAIN; | ||
1428 | } | 1408 | } |
1429 | 1409 | ||
1430 | /** | 1410 | /** |
@@ -1449,25 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1449 | s64 *timeout, | 1429 | s64 *timeout, |
1450 | struct intel_rps_client *rps) | 1430 | struct intel_rps_client *rps) |
1451 | { | 1431 | { |
1452 | struct intel_engine_cs *engine = i915_gem_request_get_engine(req); | ||
1453 | struct drm_i915_private *dev_priv = req->i915; | ||
1454 | const bool irq_test_in_progress = | ||
1455 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); | ||
1456 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | 1432 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; |
1457 | DEFINE_WAIT(wait); | 1433 | DEFINE_WAIT(reset); |
1458 | unsigned long timeout_expire; | 1434 | struct intel_wait wait; |
1435 | unsigned long timeout_remain; | ||
1459 | s64 before = 0; /* Only to silence a compiler warning. */ | 1436 | s64 before = 0; /* Only to silence a compiler warning. */ |
1460 | int ret; | 1437 | int ret = 0; |
1461 | 1438 | ||
1462 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); | 1439 | might_sleep(); |
1463 | 1440 | ||
1464 | if (list_empty(&req->list)) | 1441 | if (list_empty(&req->list)) |
1465 | return 0; | 1442 | return 0; |
1466 | 1443 | ||
1467 | if (i915_gem_request_completed(req, true)) | 1444 | if (i915_gem_request_completed(req)) |
1468 | return 0; | 1445 | return 0; |
1469 | 1446 | ||
1470 | timeout_expire = 0; | 1447 | timeout_remain = MAX_SCHEDULE_TIMEOUT; |
1471 | if (timeout) { | 1448 | if (timeout) { |
1472 | if (WARN_ON(*timeout < 0)) | 1449 | if (WARN_ON(*timeout < 0)) |
1473 | return -EINVAL; | 1450 | return -EINVAL; |
@@ -1475,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1475 | if (*timeout == 0) | 1452 | if (*timeout == 0) |
1476 | return -ETIME; | 1453 | return -ETIME; |
1477 | 1454 | ||
1478 | timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); | 1455 | timeout_remain = nsecs_to_jiffies_timeout(*timeout); |
1479 | 1456 | ||
1480 | /* | 1457 | /* |
1481 | * Record current time in case interrupted by signal, or wedged. | 1458 | * Record current time in case interrupted by signal, or wedged. |
@@ -1483,75 +1460,85 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1483 | before = ktime_get_raw_ns(); | 1460 | before = ktime_get_raw_ns(); |
1484 | } | 1461 | } |
1485 | 1462 | ||
1486 | if (INTEL_INFO(dev_priv)->gen >= 6) | ||
1487 | gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); | ||
1488 | |||
1489 | trace_i915_gem_request_wait_begin(req); | 1463 | trace_i915_gem_request_wait_begin(req); |
1490 | 1464 | ||
1491 | /* Optimistic spin for the next jiffie before touching IRQs */ | 1465 | /* This client is about to stall waiting for the GPU. In many cases |
1492 | ret = __i915_spin_request(req, state); | 1466 | * this is undesirable and limits the throughput of the system, as |
1493 | if (ret == 0) | 1467 | * many clients cannot continue processing user input/output whilst |
1494 | goto out; | 1468 | * blocked. RPS autotuning may take tens of milliseconds to respond |
1495 | 1469 | * to the GPU load and thus incurs additional latency for the client. | |
1496 | if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) { | 1470 | * We can circumvent that by promoting the GPU frequency to maximum |
1497 | ret = -ENODEV; | 1471 | * before we wait. This makes the GPU throttle up much more quickly |
1498 | goto out; | 1472 | * (good for benchmarks and user experience, e.g. window animations), |
1499 | } | 1473 | * but at a cost of spending more power processing the workload |
1474 | * (bad for battery). Not all clients even want their results | ||
1475 | * immediately and for them we should just let the GPU select its own | ||
1476 | * frequency to maximise efficiency. To prevent a single client from | ||
1477 | * forcing the clocks too high for the whole system, we only allow | ||
1478 | * each client to waitboost once in a busy period. | ||
1479 | */ | ||
1480 | if (INTEL_INFO(req->i915)->gen >= 6) | ||
1481 | gen6_rps_boost(req->i915, rps, req->emitted_jiffies); | ||
1500 | 1482 | ||
1501 | for (;;) { | 1483 | /* Optimistic spin for the next ~jiffie before touching IRQs */ |
1502 | struct timer_list timer; | 1484 | if (i915_spin_request(req, state, 5)) |
1485 | goto complete; | ||
1503 | 1486 | ||
1504 | prepare_to_wait(&engine->irq_queue, &wait, state); | 1487 | set_current_state(state); |
1488 | add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | ||
1505 | 1489 | ||
1506 | /* We need to check whether any gpu reset happened in between | 1490 | intel_wait_init(&wait, req->seqno); |
1507 | * the request being submitted and now. If a reset has occurred, | 1491 | if (intel_engine_add_wait(req->engine, &wait)) |
1508 | * the request is effectively complete (we either are in the | 1492 | /* In order to check that we haven't missed the interrupt |
1509 | * process of or have discarded the rendering and completely | 1493 | * as we enabled it, we need to kick ourselves to do a |
1510 | * reset the GPU. The results of the request are lost and we | 1494 | * coherent check on the seqno before we sleep. |
1511 | * are free to continue on with the original operation. | ||
1512 | */ | 1495 | */ |
1513 | if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { | 1496 | goto wakeup; |
1514 | ret = 0; | ||
1515 | break; | ||
1516 | } | ||
1517 | |||
1518 | if (i915_gem_request_completed(req, false)) { | ||
1519 | ret = 0; | ||
1520 | break; | ||
1521 | } | ||
1522 | 1497 | ||
1498 | for (;;) { | ||
1523 | if (signal_pending_state(state, current)) { | 1499 | if (signal_pending_state(state, current)) { |
1524 | ret = -ERESTARTSYS; | 1500 | ret = -ERESTARTSYS; |
1525 | break; | 1501 | break; |
1526 | } | 1502 | } |
1527 | 1503 | ||
1528 | if (timeout && time_after_eq(jiffies, timeout_expire)) { | 1504 | /* Ensure that even if the GPU hangs, we get woken up. |
1505 | * | ||
1506 | * However, note that if no one is waiting, we never notice | ||
1507 | * a gpu hang. Eventually, we will have to wait for a resource | ||
1508 | * held by the GPU and so trigger a hangcheck. In the most | ||
1509 | * pathological case, this will be upon memory starvation! | ||
1510 | */ | ||
1511 | i915_queue_hangcheck(req->i915); | ||
1512 | |||
1513 | timeout_remain = io_schedule_timeout(timeout_remain); | ||
1514 | if (timeout_remain == 0) { | ||
1529 | ret = -ETIME; | 1515 | ret = -ETIME; |
1530 | break; | 1516 | break; |
1531 | } | 1517 | } |
1532 | 1518 | ||
1533 | timer.function = NULL; | 1519 | if (intel_wait_complete(&wait)) |
1534 | if (timeout || missed_irq(dev_priv, engine)) { | 1520 | break; |
1535 | unsigned long expire; | ||
1536 | 1521 | ||
1537 | setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); | 1522 | set_current_state(state); |
1538 | expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire; | ||
1539 | mod_timer(&timer, expire); | ||
1540 | } | ||
1541 | 1523 | ||
1542 | io_schedule(); | 1524 | wakeup: |
1525 | /* Carefully check if the request is complete, giving time | ||
1526 | * for the seqno to be visible following the interrupt. | ||
1527 | * We also have to check in case we are kicked by the GPU | ||
1528 | * reset in order to drop the struct_mutex. | ||
1529 | */ | ||
1530 | if (__i915_request_irq_complete(req)) | ||
1531 | break; | ||
1543 | 1532 | ||
1544 | if (timer.function) { | 1533 | /* Only spin if we know the GPU is processing this request */ |
1545 | del_singleshot_timer_sync(&timer); | 1534 | if (i915_spin_request(req, state, 2)) |
1546 | destroy_timer_on_stack(&timer); | 1535 | break; |
1547 | } | ||
1548 | } | 1536 | } |
1549 | if (!irq_test_in_progress) | 1537 | remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); |
1550 | engine->irq_put(engine); | ||
1551 | |||
1552 | finish_wait(&engine->irq_queue, &wait); | ||
1553 | 1538 | ||
1554 | out: | 1539 | intel_engine_remove_wait(req->engine, &wait); |
1540 | __set_current_state(TASK_RUNNING); | ||
1541 | complete: | ||
1555 | trace_i915_gem_request_wait_end(req); | 1542 | trace_i915_gem_request_wait_end(req); |
1556 | 1543 | ||
1557 | if (timeout) { | 1544 | if (timeout) { |
@@ -1570,6 +1557,22 @@ out: | |||
1570 | *timeout = 0; | 1557 | *timeout = 0; |
1571 | } | 1558 | } |
1572 | 1559 | ||
1560 | if (rps && req->seqno == req->engine->last_submitted_seqno) { | ||
1561 | /* The GPU is now idle and this client has stalled. | ||
1562 | * Since no other client has submitted a request in the | ||
1563 | * meantime, assume that this client is the only one | ||
1564 | * supplying work to the GPU but is unable to keep that | ||
1565 | * work supplied because it is waiting. Since the GPU is | ||
1566 | * then never kept fully busy, RPS autoclocking will | ||
1567 | * keep the clocks relatively low, causing further delays. | ||
1568 | * Compensate by giving the synchronous client credit for | ||
1569 | * a waitboost next time. | ||
1570 | */ | ||
1571 | spin_lock(&req->i915->rps.client_lock); | ||
1572 | list_del_init(&rps->link); | ||
1573 | spin_unlock(&req->i915->rps.client_lock); | ||
1574 | } | ||
1575 | |||
1573 | return ret; | 1576 | return ret; |
1574 | } | 1577 | } |
1575 | 1578 | ||
@@ -1648,7 +1651,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) | |||
1648 | struct intel_engine_cs *engine = req->engine; | 1651 | struct intel_engine_cs *engine = req->engine; |
1649 | struct drm_i915_gem_request *tmp; | 1652 | struct drm_i915_gem_request *tmp; |
1650 | 1653 | ||
1651 | lockdep_assert_held(&engine->i915->dev->struct_mutex); | 1654 | lockdep_assert_held(&engine->i915->drm.struct_mutex); |
1652 | 1655 | ||
1653 | if (list_empty(&req->list)) | 1656 | if (list_empty(&req->list)) |
1654 | return; | 1657 | return; |
@@ -1677,14 +1680,14 @@ i915_wait_request(struct drm_i915_gem_request *req) | |||
1677 | 1680 | ||
1678 | interruptible = dev_priv->mm.interruptible; | 1681 | interruptible = dev_priv->mm.interruptible; |
1679 | 1682 | ||
1680 | BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | 1683 | BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex)); |
1681 | 1684 | ||
1682 | ret = __i915_wait_request(req, interruptible, NULL, NULL); | 1685 | ret = __i915_wait_request(req, interruptible, NULL, NULL); |
1683 | if (ret) | 1686 | if (ret) |
1684 | return ret; | 1687 | return ret; |
1685 | 1688 | ||
1686 | /* If the GPU hung, we want to keep the requests to find the guilty. */ | 1689 | /* If the GPU hung, we want to keep the requests to find the guilty. */ |
1687 | if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) | 1690 | if (!i915_reset_in_progress(&dev_priv->gpu_error)) |
1688 | __i915_gem_request_retire__upto(req); | 1691 | __i915_gem_request_retire__upto(req); |
1689 | 1692 | ||
1690 | return 0; | 1693 | return 0; |
@@ -1745,7 +1748,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, | |||
1745 | else if (obj->last_write_req == req) | 1748 | else if (obj->last_write_req == req) |
1746 | i915_gem_object_retire__write(obj); | 1749 | i915_gem_object_retire__write(obj); |
1747 | 1750 | ||
1748 | if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) | 1751 | if (!i915_reset_in_progress(&req->i915->gpu_error)) |
1749 | __i915_gem_request_retire__upto(req); | 1752 | __i915_gem_request_retire__upto(req); |
1750 | } | 1753 | } |
1751 | 1754 | ||
@@ -1758,7 +1761,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1758 | bool readonly) | 1761 | bool readonly) |
1759 | { | 1762 | { |
1760 | struct drm_device *dev = obj->base.dev; | 1763 | struct drm_device *dev = obj->base.dev; |
1761 | struct drm_i915_private *dev_priv = dev->dev_private; | 1764 | struct drm_i915_private *dev_priv = to_i915(dev); |
1762 | struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; | 1765 | struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; |
1763 | int ret, i, n = 0; | 1766 | int ret, i, n = 0; |
1764 | 1767 | ||
@@ -1809,6 +1812,13 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file) | |||
1809 | return &fpriv->rps; | 1812 | return &fpriv->rps; |
1810 | } | 1813 | } |
1811 | 1814 | ||
1815 | static enum fb_op_origin | ||
1816 | write_origin(struct drm_i915_gem_object *obj, unsigned domain) | ||
1817 | { | ||
1818 | return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ? | ||
1819 | ORIGIN_GTT : ORIGIN_CPU; | ||
1820 | } | ||
1821 | |||
1812 | /** | 1822 | /** |
1813 | * Called when user space prepares to use an object with the CPU, either | 1823 | * Called when user space prepares to use an object with the CPU, either |
1814 | * through the mmap ioctl's mapping or a GTT mapping. | 1824 | * through the mmap ioctl's mapping or a GTT mapping. |
@@ -1865,9 +1875,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1865 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1875 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
1866 | 1876 | ||
1867 | if (write_domain != 0) | 1877 | if (write_domain != 0) |
1868 | intel_fb_obj_invalidate(obj, | 1878 | intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); |
1869 | write_domain == I915_GEM_DOMAIN_GTT ? | ||
1870 | ORIGIN_GTT : ORIGIN_CPU); | ||
1871 | 1879 | ||
1872 | unref: | 1880 | unref: |
1873 | drm_gem_object_unreference(&obj->base); | 1881 | drm_gem_object_unreference(&obj->base); |
@@ -1974,6 +1982,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1974 | else | 1982 | else |
1975 | addr = -ENOMEM; | 1983 | addr = -ENOMEM; |
1976 | up_write(&mm->mmap_sem); | 1984 | up_write(&mm->mmap_sem); |
1985 | |||
1986 | /* This may race, but that's ok, it only gets set */ | ||
1987 | WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true); | ||
1977 | } | 1988 | } |
1978 | drm_gem_object_unreference_unlocked(obj); | 1989 | drm_gem_object_unreference_unlocked(obj); |
1979 | if (IS_ERR((void *)addr)) | 1990 | if (IS_ERR((void *)addr)) |
@@ -2262,7 +2273,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, | |||
2262 | 2273 | ||
2263 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | 2274 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) |
2264 | { | 2275 | { |
2265 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2276 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
2266 | int ret; | 2277 | int ret; |
2267 | 2278 | ||
2268 | dev_priv->mm.shrinker_no_lock_stealing = true; | 2279 | dev_priv->mm.shrinker_no_lock_stealing = true; |
@@ -2478,7 +2489,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) | |||
2478 | static int | 2489 | static int |
2479 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | 2490 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) |
2480 | { | 2491 | { |
2481 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2492 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
2482 | int page_count, i; | 2493 | int page_count, i; |
2483 | struct address_space *mapping; | 2494 | struct address_space *mapping; |
2484 | struct sg_table *st; | 2495 | struct sg_table *st; |
@@ -2609,7 +2620,7 @@ err_pages: | |||
2609 | int | 2620 | int |
2610 | i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | 2621 | i915_gem_object_get_pages(struct drm_i915_gem_object *obj) |
2611 | { | 2622 | { |
2612 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2623 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
2613 | const struct drm_i915_gem_object_ops *ops = obj->ops; | 2624 | const struct drm_i915_gem_object_ops *ops = obj->ops; |
2614 | int ret; | 2625 | int ret; |
2615 | 2626 | ||
@@ -2773,6 +2784,13 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) | |||
2773 | } | 2784 | } |
2774 | i915_gem_retire_requests(dev_priv); | 2785 | i915_gem_retire_requests(dev_priv); |
2775 | 2786 | ||
2787 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ | ||
2788 | if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { | ||
2789 | while (intel_kick_waiters(dev_priv) || | ||
2790 | intel_kick_signalers(dev_priv)) | ||
2791 | yield(); | ||
2792 | } | ||
2793 | |||
2776 | /* Finally reset hw state */ | 2794 | /* Finally reset hw state */ |
2777 | for_each_engine(engine, dev_priv) | 2795 | for_each_engine(engine, dev_priv) |
2778 | intel_ring_init_seqno(engine, seqno); | 2796 | intel_ring_init_seqno(engine, seqno); |
@@ -2782,7 +2800,7 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) | |||
2782 | 2800 | ||
2783 | int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) | 2801 | int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) |
2784 | { | 2802 | { |
2785 | struct drm_i915_private *dev_priv = dev->dev_private; | 2803 | struct drm_i915_private *dev_priv = to_i915(dev); |
2786 | int ret; | 2804 | int ret; |
2787 | 2805 | ||
2788 | if (seqno == 0) | 2806 | if (seqno == 0) |
@@ -2822,6 +2840,26 @@ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) | |||
2822 | return 0; | 2840 | return 0; |
2823 | } | 2841 | } |
2824 | 2842 | ||
2843 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) | ||
2844 | { | ||
2845 | struct drm_i915_private *dev_priv = engine->i915; | ||
2846 | |||
2847 | dev_priv->gt.active_engines |= intel_engine_flag(engine); | ||
2848 | if (dev_priv->gt.awake) | ||
2849 | return; | ||
2850 | |||
2851 | intel_runtime_pm_get_noresume(dev_priv); | ||
2852 | dev_priv->gt.awake = true; | ||
2853 | |||
2854 | i915_update_gfx_val(dev_priv); | ||
2855 | if (INTEL_GEN(dev_priv) >= 6) | ||
2856 | gen6_rps_busy(dev_priv); | ||
2857 | |||
2858 | queue_delayed_work(dev_priv->wq, | ||
2859 | &dev_priv->gt.retire_work, | ||
2860 | round_jiffies_up_relative(HZ)); | ||
2861 | } | ||
2862 | |||
2825 | /* | 2863 | /* |
2826 | * NB: This function is not allowed to fail. Doing so would mean the the | 2864 | * NB: This function is not allowed to fail. Doing so would mean the the |
2827 | * request is not being tracked for completion but the work itself is | 2865 | * request is not being tracked for completion but the work itself is |
@@ -2832,7 +2870,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, | |||
2832 | bool flush_caches) | 2870 | bool flush_caches) |
2833 | { | 2871 | { |
2834 | struct intel_engine_cs *engine; | 2872 | struct intel_engine_cs *engine; |
2835 | struct drm_i915_private *dev_priv; | ||
2836 | struct intel_ringbuffer *ringbuf; | 2873 | struct intel_ringbuffer *ringbuf; |
2837 | u32 request_start; | 2874 | u32 request_start; |
2838 | u32 reserved_tail; | 2875 | u32 reserved_tail; |
@@ -2842,7 +2879,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, | |||
2842 | return; | 2879 | return; |
2843 | 2880 | ||
2844 | engine = request->engine; | 2881 | engine = request->engine; |
2845 | dev_priv = request->i915; | ||
2846 | ringbuf = request->ringbuf; | 2882 | ringbuf = request->ringbuf; |
2847 | 2883 | ||
2848 | /* | 2884 | /* |
@@ -2908,14 +2944,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, | |||
2908 | } | 2944 | } |
2909 | /* Not allowed to fail! */ | 2945 | /* Not allowed to fail! */ |
2910 | WARN(ret, "emit|add_request failed: %d!\n", ret); | 2946 | WARN(ret, "emit|add_request failed: %d!\n", ret); |
2911 | |||
2912 | i915_queue_hangcheck(engine->i915); | ||
2913 | |||
2914 | queue_delayed_work(dev_priv->wq, | ||
2915 | &dev_priv->mm.retire_work, | ||
2916 | round_jiffies_up_relative(HZ)); | ||
2917 | intel_mark_busy(dev_priv); | ||
2918 | |||
2919 | /* Sanity check that the reserved size was large enough. */ | 2947 | /* Sanity check that the reserved size was large enough. */ |
2920 | ret = intel_ring_get_tail(ringbuf) - request_start; | 2948 | ret = intel_ring_get_tail(ringbuf) - request_start; |
2921 | if (ret < 0) | 2949 | if (ret < 0) |
@@ -2924,46 +2952,34 @@ void __i915_add_request(struct drm_i915_gem_request *request, | |||
2924 | "Not enough space reserved (%d bytes) " | 2952 | "Not enough space reserved (%d bytes) " |
2925 | "for adding the request (%d bytes)\n", | 2953 | "for adding the request (%d bytes)\n", |
2926 | reserved_tail, ret); | 2954 | reserved_tail, ret); |
2955 | |||
2956 | i915_gem_mark_busy(engine); | ||
2927 | } | 2957 | } |
2928 | 2958 | ||
2929 | static bool i915_context_is_banned(struct drm_i915_private *dev_priv, | 2959 | static bool i915_context_is_banned(const struct i915_gem_context *ctx) |
2930 | const struct i915_gem_context *ctx) | ||
2931 | { | 2960 | { |
2932 | unsigned long elapsed; | 2961 | unsigned long elapsed; |
2933 | 2962 | ||
2934 | elapsed = get_seconds() - ctx->hang_stats.guilty_ts; | ||
2935 | |||
2936 | if (ctx->hang_stats.banned) | 2963 | if (ctx->hang_stats.banned) |
2937 | return true; | 2964 | return true; |
2938 | 2965 | ||
2966 | elapsed = get_seconds() - ctx->hang_stats.guilty_ts; | ||
2939 | if (ctx->hang_stats.ban_period_seconds && | 2967 | if (ctx->hang_stats.ban_period_seconds && |
2940 | elapsed <= ctx->hang_stats.ban_period_seconds) { | 2968 | elapsed <= ctx->hang_stats.ban_period_seconds) { |
2941 | if (!i915_gem_context_is_default(ctx)) { | 2969 | DRM_DEBUG("context hanging too fast, banning!\n"); |
2942 | DRM_DEBUG("context hanging too fast, banning!\n"); | 2970 | return true; |
2943 | return true; | ||
2944 | } else if (i915_stop_ring_allow_ban(dev_priv)) { | ||
2945 | if (i915_stop_ring_allow_warn(dev_priv)) | ||
2946 | DRM_ERROR("gpu hanging too fast, banning!\n"); | ||
2947 | return true; | ||
2948 | } | ||
2949 | } | 2971 | } |
2950 | 2972 | ||
2951 | return false; | 2973 | return false; |
2952 | } | 2974 | } |
2953 | 2975 | ||
2954 | static void i915_set_reset_status(struct drm_i915_private *dev_priv, | 2976 | static void i915_set_reset_status(struct i915_gem_context *ctx, |
2955 | struct i915_gem_context *ctx, | ||
2956 | const bool guilty) | 2977 | const bool guilty) |
2957 | { | 2978 | { |
2958 | struct i915_ctx_hang_stats *hs; | 2979 | struct i915_ctx_hang_stats *hs = &ctx->hang_stats; |
2959 | |||
2960 | if (WARN_ON(!ctx)) | ||
2961 | return; | ||
2962 | |||
2963 | hs = &ctx->hang_stats; | ||
2964 | 2980 | ||
2965 | if (guilty) { | 2981 | if (guilty) { |
2966 | hs->banned = i915_context_is_banned(dev_priv, ctx); | 2982 | hs->banned = i915_context_is_banned(ctx); |
2967 | hs->batch_active++; | 2983 | hs->batch_active++; |
2968 | hs->guilty_ts = get_seconds(); | 2984 | hs->guilty_ts = get_seconds(); |
2969 | } else { | 2985 | } else { |
@@ -3012,7 +3028,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
3012 | kref_init(&req->ref); | 3028 | kref_init(&req->ref); |
3013 | req->i915 = dev_priv; | 3029 | req->i915 = dev_priv; |
3014 | req->engine = engine; | 3030 | req->engine = engine; |
3015 | req->reset_counter = reset_counter; | ||
3016 | req->ctx = ctx; | 3031 | req->ctx = ctx; |
3017 | i915_gem_context_reference(req->ctx); | 3032 | i915_gem_context_reference(req->ctx); |
3018 | 3033 | ||
@@ -3072,8 +3087,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) | |||
3072 | { | 3087 | { |
3073 | struct drm_i915_gem_request *request; | 3088 | struct drm_i915_gem_request *request; |
3074 | 3089 | ||
3090 | /* We are called by the error capture and reset at a random | ||
3091 | * point in time. In particular, note that neither is crucially | ||
3092 | * ordered with an interrupt. After a hang, the GPU is dead and we | ||
3093 | * assume that no more writes can happen (we waited long enough for | ||
3094 | * all writes that were in transaction to be flushed) - adding an | ||
3095 | * extra delay for a recent interrupt is pointless. Hence, we do | ||
3096 | * not need an engine->irq_seqno_barrier() before the seqno reads. | ||
3097 | */ | ||
3075 | list_for_each_entry(request, &engine->request_list, list) { | 3098 | list_for_each_entry(request, &engine->request_list, list) { |
3076 | if (i915_gem_request_completed(request, false)) | 3099 | if (i915_gem_request_completed(request)) |
3077 | continue; | 3100 | continue; |
3078 | 3101 | ||
3079 | return request; | 3102 | return request; |
@@ -3082,27 +3105,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) | |||
3082 | return NULL; | 3105 | return NULL; |
3083 | } | 3106 | } |
3084 | 3107 | ||
3085 | static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv, | 3108 | static void i915_gem_reset_engine_status(struct intel_engine_cs *engine) |
3086 | struct intel_engine_cs *engine) | ||
3087 | { | 3109 | { |
3088 | struct drm_i915_gem_request *request; | 3110 | struct drm_i915_gem_request *request; |
3089 | bool ring_hung; | 3111 | bool ring_hung; |
3090 | 3112 | ||
3091 | request = i915_gem_find_active_request(engine); | 3113 | request = i915_gem_find_active_request(engine); |
3092 | |||
3093 | if (request == NULL) | 3114 | if (request == NULL) |
3094 | return; | 3115 | return; |
3095 | 3116 | ||
3096 | ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; | 3117 | ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; |
3097 | 3118 | ||
3098 | i915_set_reset_status(dev_priv, request->ctx, ring_hung); | 3119 | i915_set_reset_status(request->ctx, ring_hung); |
3099 | |||
3100 | list_for_each_entry_continue(request, &engine->request_list, list) | 3120 | list_for_each_entry_continue(request, &engine->request_list, list) |
3101 | i915_set_reset_status(dev_priv, request->ctx, false); | 3121 | i915_set_reset_status(request->ctx, false); |
3102 | } | 3122 | } |
3103 | 3123 | ||
3104 | static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, | 3124 | static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) |
3105 | struct intel_engine_cs *engine) | ||
3106 | { | 3125 | { |
3107 | struct intel_ringbuffer *buffer; | 3126 | struct intel_ringbuffer *buffer; |
3108 | 3127 | ||
@@ -3163,7 +3182,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, | |||
3163 | 3182 | ||
3164 | void i915_gem_reset(struct drm_device *dev) | 3183 | void i915_gem_reset(struct drm_device *dev) |
3165 | { | 3184 | { |
3166 | struct drm_i915_private *dev_priv = dev->dev_private; | 3185 | struct drm_i915_private *dev_priv = to_i915(dev); |
3167 | struct intel_engine_cs *engine; | 3186 | struct intel_engine_cs *engine; |
3168 | 3187 | ||
3169 | /* | 3188 | /* |
@@ -3172,10 +3191,10 @@ void i915_gem_reset(struct drm_device *dev) | |||
3172 | * their reference to the objects, the inspection must be done first. | 3191 | * their reference to the objects, the inspection must be done first. |
3173 | */ | 3192 | */ |
3174 | for_each_engine(engine, dev_priv) | 3193 | for_each_engine(engine, dev_priv) |
3175 | i915_gem_reset_engine_status(dev_priv, engine); | 3194 | i915_gem_reset_engine_status(engine); |
3176 | 3195 | ||
3177 | for_each_engine(engine, dev_priv) | 3196 | for_each_engine(engine, dev_priv) |
3178 | i915_gem_reset_engine_cleanup(dev_priv, engine); | 3197 | i915_gem_reset_engine_cleanup(engine); |
3179 | 3198 | ||
3180 | i915_gem_context_reset(dev); | 3199 | i915_gem_context_reset(dev); |
3181 | 3200 | ||
@@ -3205,7 +3224,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) | |||
3205 | struct drm_i915_gem_request, | 3224 | struct drm_i915_gem_request, |
3206 | list); | 3225 | list); |
3207 | 3226 | ||
3208 | if (!i915_gem_request_completed(request, true)) | 3227 | if (!i915_gem_request_completed(request)) |
3209 | break; | 3228 | break; |
3210 | 3229 | ||
3211 | i915_gem_request_retire(request); | 3230 | i915_gem_request_retire(request); |
@@ -3228,55 +3247,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) | |||
3228 | i915_gem_object_retire__read(obj, engine->id); | 3247 | i915_gem_object_retire__read(obj, engine->id); |
3229 | } | 3248 | } |
3230 | 3249 | ||
3231 | if (unlikely(engine->trace_irq_req && | ||
3232 | i915_gem_request_completed(engine->trace_irq_req, true))) { | ||
3233 | engine->irq_put(engine); | ||
3234 | i915_gem_request_assign(&engine->trace_irq_req, NULL); | ||
3235 | } | ||
3236 | |||
3237 | WARN_ON(i915_verify_lists(engine->dev)); | 3250 | WARN_ON(i915_verify_lists(engine->dev)); |
3238 | } | 3251 | } |
3239 | 3252 | ||
3240 | bool | 3253 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv) |
3241 | i915_gem_retire_requests(struct drm_i915_private *dev_priv) | ||
3242 | { | 3254 | { |
3243 | struct intel_engine_cs *engine; | 3255 | struct intel_engine_cs *engine; |
3244 | bool idle = true; | 3256 | |
3257 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | ||
3258 | |||
3259 | if (dev_priv->gt.active_engines == 0) | ||
3260 | return; | ||
3261 | |||
3262 | GEM_BUG_ON(!dev_priv->gt.awake); | ||
3245 | 3263 | ||
3246 | for_each_engine(engine, dev_priv) { | 3264 | for_each_engine(engine, dev_priv) { |
3247 | i915_gem_retire_requests_ring(engine); | 3265 | i915_gem_retire_requests_ring(engine); |
3248 | idle &= list_empty(&engine->request_list); | 3266 | if (list_empty(&engine->request_list)) |
3249 | if (i915.enable_execlists) { | 3267 | dev_priv->gt.active_engines &= ~intel_engine_flag(engine); |
3250 | spin_lock_bh(&engine->execlist_lock); | ||
3251 | idle &= list_empty(&engine->execlist_queue); | ||
3252 | spin_unlock_bh(&engine->execlist_lock); | ||
3253 | } | ||
3254 | } | 3268 | } |
3255 | 3269 | ||
3256 | if (idle) | 3270 | if (dev_priv->gt.active_engines == 0) |
3257 | mod_delayed_work(dev_priv->wq, | 3271 | queue_delayed_work(dev_priv->wq, |
3258 | &dev_priv->mm.idle_work, | 3272 | &dev_priv->gt.idle_work, |
3259 | msecs_to_jiffies(100)); | 3273 | msecs_to_jiffies(100)); |
3260 | |||
3261 | return idle; | ||
3262 | } | 3274 | } |
3263 | 3275 | ||
3264 | static void | 3276 | static void |
3265 | i915_gem_retire_work_handler(struct work_struct *work) | 3277 | i915_gem_retire_work_handler(struct work_struct *work) |
3266 | { | 3278 | { |
3267 | struct drm_i915_private *dev_priv = | 3279 | struct drm_i915_private *dev_priv = |
3268 | container_of(work, typeof(*dev_priv), mm.retire_work.work); | 3280 | container_of(work, typeof(*dev_priv), gt.retire_work.work); |
3269 | struct drm_device *dev = dev_priv->dev; | 3281 | struct drm_device *dev = &dev_priv->drm; |
3270 | bool idle; | ||
3271 | 3282 | ||
3272 | /* Come back later if the device is busy... */ | 3283 | /* Come back later if the device is busy... */ |
3273 | idle = false; | ||
3274 | if (mutex_trylock(&dev->struct_mutex)) { | 3284 | if (mutex_trylock(&dev->struct_mutex)) { |
3275 | idle = i915_gem_retire_requests(dev_priv); | 3285 | i915_gem_retire_requests(dev_priv); |
3276 | mutex_unlock(&dev->struct_mutex); | 3286 | mutex_unlock(&dev->struct_mutex); |
3277 | } | 3287 | } |
3278 | if (!idle) | 3288 | |
3279 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, | 3289 | /* Keep the retire handler running until we are finally idle. |
3290 | * We do not need to do this test under locking as in the worst-case | ||
3291 | * we queue the retire worker once too often. | ||
3292 | */ | ||
3293 | if (READ_ONCE(dev_priv->gt.awake)) | ||
3294 | queue_delayed_work(dev_priv->wq, | ||
3295 | &dev_priv->gt.retire_work, | ||
3280 | round_jiffies_up_relative(HZ)); | 3296 | round_jiffies_up_relative(HZ)); |
3281 | } | 3297 | } |
3282 | 3298 | ||
@@ -3284,25 +3300,55 @@ static void | |||
3284 | i915_gem_idle_work_handler(struct work_struct *work) | 3300 | i915_gem_idle_work_handler(struct work_struct *work) |
3285 | { | 3301 | { |
3286 | struct drm_i915_private *dev_priv = | 3302 | struct drm_i915_private *dev_priv = |
3287 | container_of(work, typeof(*dev_priv), mm.idle_work.work); | 3303 | container_of(work, typeof(*dev_priv), gt.idle_work.work); |
3288 | struct drm_device *dev = dev_priv->dev; | 3304 | struct drm_device *dev = &dev_priv->drm; |
3289 | struct intel_engine_cs *engine; | 3305 | struct intel_engine_cs *engine; |
3306 | unsigned int stuck_engines; | ||
3307 | bool rearm_hangcheck; | ||
3308 | |||
3309 | if (!READ_ONCE(dev_priv->gt.awake)) | ||
3310 | return; | ||
3311 | |||
3312 | if (READ_ONCE(dev_priv->gt.active_engines)) | ||
3313 | return; | ||
3314 | |||
3315 | rearm_hangcheck = | ||
3316 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | ||
3317 | |||
3318 | if (!mutex_trylock(&dev->struct_mutex)) { | ||
3319 | /* Currently busy, come back later */ | ||
3320 | mod_delayed_work(dev_priv->wq, | ||
3321 | &dev_priv->gt.idle_work, | ||
3322 | msecs_to_jiffies(50)); | ||
3323 | goto out_rearm; | ||
3324 | } | ||
3325 | |||
3326 | if (dev_priv->gt.active_engines) | ||
3327 | goto out_unlock; | ||
3290 | 3328 | ||
3291 | for_each_engine(engine, dev_priv) | 3329 | for_each_engine(engine, dev_priv) |
3292 | if (!list_empty(&engine->request_list)) | 3330 | i915_gem_batch_pool_fini(&engine->batch_pool); |
3293 | return; | ||
3294 | 3331 | ||
3295 | /* we probably should sync with hangcheck here, using cancel_work_sync. | 3332 | GEM_BUG_ON(!dev_priv->gt.awake); |
3296 | * Also locking seems to be fubar here, engine->request_list is protected | 3333 | dev_priv->gt.awake = false; |
3297 | * by dev->struct_mutex. */ | 3334 | rearm_hangcheck = false; |
3298 | 3335 | ||
3299 | intel_mark_idle(dev_priv); | 3336 | stuck_engines = intel_kick_waiters(dev_priv); |
3337 | if (unlikely(stuck_engines)) { | ||
3338 | DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n"); | ||
3339 | dev_priv->gpu_error.missed_irq_rings |= stuck_engines; | ||
3340 | } | ||
3300 | 3341 | ||
3301 | if (mutex_trylock(&dev->struct_mutex)) { | 3342 | if (INTEL_GEN(dev_priv) >= 6) |
3302 | for_each_engine(engine, dev_priv) | 3343 | gen6_rps_idle(dev_priv); |
3303 | i915_gem_batch_pool_fini(&engine->batch_pool); | 3344 | intel_runtime_pm_put(dev_priv); |
3345 | out_unlock: | ||
3346 | mutex_unlock(&dev->struct_mutex); | ||
3304 | 3347 | ||
3305 | mutex_unlock(&dev->struct_mutex); | 3348 | out_rearm: |
3349 | if (rearm_hangcheck) { | ||
3350 | GEM_BUG_ON(!dev_priv->gt.awake); | ||
3351 | i915_queue_hangcheck(dev_priv); | ||
3306 | } | 3352 | } |
3307 | } | 3353 | } |
3308 | 3354 | ||
@@ -3327,7 +3373,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | |||
3327 | if (req == NULL) | 3373 | if (req == NULL) |
3328 | continue; | 3374 | continue; |
3329 | 3375 | ||
3330 | if (i915_gem_request_completed(req, true)) | 3376 | if (i915_gem_request_completed(req)) |
3331 | i915_gem_object_retire__read(obj, i); | 3377 | i915_gem_object_retire__read(obj, i); |
3332 | } | 3378 | } |
3333 | 3379 | ||
@@ -3435,7 +3481,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
3435 | if (to == from) | 3481 | if (to == from) |
3436 | return 0; | 3482 | return 0; |
3437 | 3483 | ||
3438 | if (i915_gem_request_completed(from_req, true)) | 3484 | if (i915_gem_request_completed(from_req)) |
3439 | return 0; | 3485 | return 0; |
3440 | 3486 | ||
3441 | if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { | 3487 | if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { |
@@ -3586,7 +3632,7 @@ static void __i915_vma_iounmap(struct i915_vma *vma) | |||
3586 | static int __i915_vma_unbind(struct i915_vma *vma, bool wait) | 3632 | static int __i915_vma_unbind(struct i915_vma *vma, bool wait) |
3587 | { | 3633 | { |
3588 | struct drm_i915_gem_object *obj = vma->obj; | 3634 | struct drm_i915_gem_object *obj = vma->obj; |
3589 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 3635 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
3590 | int ret; | 3636 | int ret; |
3591 | 3637 | ||
3592 | if (list_empty(&vma->obj_link)) | 3638 | if (list_empty(&vma->obj_link)) |
@@ -3662,26 +3708,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma) | |||
3662 | return __i915_vma_unbind(vma, false); | 3708 | return __i915_vma_unbind(vma, false); |
3663 | } | 3709 | } |
3664 | 3710 | ||
3665 | int i915_gpu_idle(struct drm_device *dev) | 3711 | int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv) |
3666 | { | 3712 | { |
3667 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3668 | struct intel_engine_cs *engine; | 3713 | struct intel_engine_cs *engine; |
3669 | int ret; | 3714 | int ret; |
3670 | 3715 | ||
3671 | /* Flush everything onto the inactive list. */ | 3716 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
3672 | for_each_engine(engine, dev_priv) { | ||
3673 | if (!i915.enable_execlists) { | ||
3674 | struct drm_i915_gem_request *req; | ||
3675 | 3717 | ||
3676 | req = i915_gem_request_alloc(engine, NULL); | 3718 | for_each_engine(engine, dev_priv) { |
3677 | if (IS_ERR(req)) | 3719 | if (engine->last_context == NULL) |
3678 | return PTR_ERR(req); | 3720 | continue; |
3679 | |||
3680 | ret = i915_switch_context(req); | ||
3681 | i915_add_request_no_flush(req); | ||
3682 | if (ret) | ||
3683 | return ret; | ||
3684 | } | ||
3685 | 3721 | ||
3686 | ret = intel_engine_idle(engine); | 3722 | ret = intel_engine_idle(engine); |
3687 | if (ret) | 3723 | if (ret) |
@@ -4214,7 +4250,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, | |||
4214 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | 4250 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
4215 | struct drm_file *file) | 4251 | struct drm_file *file) |
4216 | { | 4252 | { |
4217 | struct drm_i915_private *dev_priv = dev->dev_private; | 4253 | struct drm_i915_private *dev_priv = to_i915(dev); |
4218 | struct drm_i915_gem_caching *args = data; | 4254 | struct drm_i915_gem_caching *args = data; |
4219 | struct drm_i915_gem_object *obj; | 4255 | struct drm_i915_gem_object *obj; |
4220 | enum i915_cache_level level; | 4256 | enum i915_cache_level level; |
@@ -4408,7 +4444,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
4408 | static int | 4444 | static int |
4409 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | 4445 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
4410 | { | 4446 | { |
4411 | struct drm_i915_private *dev_priv = dev->dev_private; | 4447 | struct drm_i915_private *dev_priv = to_i915(dev); |
4412 | struct drm_i915_file_private *file_priv = file->driver_priv; | 4448 | struct drm_i915_file_private *file_priv = file->driver_priv; |
4413 | unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; | 4449 | unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; |
4414 | struct drm_i915_gem_request *request, *target = NULL; | 4450 | struct drm_i915_gem_request *request, *target = NULL; |
@@ -4444,9 +4480,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
4444 | return 0; | 4480 | return 0; |
4445 | 4481 | ||
4446 | ret = __i915_wait_request(target, true, NULL, NULL); | 4482 | ret = __i915_wait_request(target, true, NULL, NULL); |
4447 | if (ret == 0) | ||
4448 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | ||
4449 | |||
4450 | i915_gem_request_unreference(target); | 4483 | i915_gem_request_unreference(target); |
4451 | 4484 | ||
4452 | return ret; | 4485 | return ret; |
@@ -4505,7 +4538,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, | |||
4505 | uint32_t alignment, | 4538 | uint32_t alignment, |
4506 | uint64_t flags) | 4539 | uint64_t flags) |
4507 | { | 4540 | { |
4508 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 4541 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
4509 | struct i915_vma *vma; | 4542 | struct i915_vma *vma; |
4510 | unsigned bound; | 4543 | unsigned bound; |
4511 | int ret; | 4544 | int ret; |
@@ -4669,7 +4702,7 @@ int | |||
4669 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | 4702 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
4670 | struct drm_file *file_priv) | 4703 | struct drm_file *file_priv) |
4671 | { | 4704 | { |
4672 | struct drm_i915_private *dev_priv = dev->dev_private; | 4705 | struct drm_i915_private *dev_priv = to_i915(dev); |
4673 | struct drm_i915_gem_madvise *args = data; | 4706 | struct drm_i915_gem_madvise *args = data; |
4674 | struct drm_i915_gem_object *obj; | 4707 | struct drm_i915_gem_object *obj; |
4675 | int ret; | 4708 | int ret; |
@@ -4739,7 +4772,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
4739 | obj->fence_reg = I915_FENCE_REG_NONE; | 4772 | obj->fence_reg = I915_FENCE_REG_NONE; |
4740 | obj->madv = I915_MADV_WILLNEED; | 4773 | obj->madv = I915_MADV_WILLNEED; |
4741 | 4774 | ||
4742 | i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); | 4775 | i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); |
4743 | } | 4776 | } |
4744 | 4777 | ||
4745 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { | 4778 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
@@ -4834,7 +4867,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4834 | { | 4867 | { |
4835 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | 4868 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
4836 | struct drm_device *dev = obj->base.dev; | 4869 | struct drm_device *dev = obj->base.dev; |
4837 | struct drm_i915_private *dev_priv = dev->dev_private; | 4870 | struct drm_i915_private *dev_priv = to_i915(dev); |
4838 | struct i915_vma *vma, *next; | 4871 | struct i915_vma *vma, *next; |
4839 | 4872 | ||
4840 | intel_runtime_pm_get(dev_priv); | 4873 | intel_runtime_pm_get(dev_priv); |
@@ -4938,7 +4971,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma) | |||
4938 | static void | 4971 | static void |
4939 | i915_gem_stop_engines(struct drm_device *dev) | 4972 | i915_gem_stop_engines(struct drm_device *dev) |
4940 | { | 4973 | { |
4941 | struct drm_i915_private *dev_priv = dev->dev_private; | 4974 | struct drm_i915_private *dev_priv = to_i915(dev); |
4942 | struct intel_engine_cs *engine; | 4975 | struct intel_engine_cs *engine; |
4943 | 4976 | ||
4944 | for_each_engine(engine, dev_priv) | 4977 | for_each_engine(engine, dev_priv) |
@@ -4948,11 +4981,11 @@ i915_gem_stop_engines(struct drm_device *dev) | |||
4948 | int | 4981 | int |
4949 | i915_gem_suspend(struct drm_device *dev) | 4982 | i915_gem_suspend(struct drm_device *dev) |
4950 | { | 4983 | { |
4951 | struct drm_i915_private *dev_priv = dev->dev_private; | 4984 | struct drm_i915_private *dev_priv = to_i915(dev); |
4952 | int ret = 0; | 4985 | int ret = 0; |
4953 | 4986 | ||
4954 | mutex_lock(&dev->struct_mutex); | 4987 | mutex_lock(&dev->struct_mutex); |
4955 | ret = i915_gpu_idle(dev); | 4988 | ret = i915_gem_wait_for_idle(dev_priv); |
4956 | if (ret) | 4989 | if (ret) |
4957 | goto err; | 4990 | goto err; |
4958 | 4991 | ||
@@ -4963,13 +4996,13 @@ i915_gem_suspend(struct drm_device *dev) | |||
4963 | mutex_unlock(&dev->struct_mutex); | 4996 | mutex_unlock(&dev->struct_mutex); |
4964 | 4997 | ||
4965 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | 4998 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); |
4966 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | 4999 | cancel_delayed_work_sync(&dev_priv->gt.retire_work); |
4967 | flush_delayed_work(&dev_priv->mm.idle_work); | 5000 | flush_delayed_work(&dev_priv->gt.idle_work); |
4968 | 5001 | ||
4969 | /* Assert that we sucessfully flushed all the work and | 5002 | /* Assert that we sucessfully flushed all the work and |
4970 | * reset the GPU back to its idle, low power state. | 5003 | * reset the GPU back to its idle, low power state. |
4971 | */ | 5004 | */ |
4972 | WARN_ON(dev_priv->mm.busy); | 5005 | WARN_ON(dev_priv->gt.awake); |
4973 | 5006 | ||
4974 | return 0; | 5007 | return 0; |
4975 | 5008 | ||
@@ -4980,7 +5013,7 @@ err: | |||
4980 | 5013 | ||
4981 | void i915_gem_init_swizzling(struct drm_device *dev) | 5014 | void i915_gem_init_swizzling(struct drm_device *dev) |
4982 | { | 5015 | { |
4983 | struct drm_i915_private *dev_priv = dev->dev_private; | 5016 | struct drm_i915_private *dev_priv = to_i915(dev); |
4984 | 5017 | ||
4985 | if (INTEL_INFO(dev)->gen < 5 || | 5018 | if (INTEL_INFO(dev)->gen < 5 || |
4986 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) | 5019 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) |
@@ -5005,7 +5038,7 @@ void i915_gem_init_swizzling(struct drm_device *dev) | |||
5005 | 5038 | ||
5006 | static void init_unused_ring(struct drm_device *dev, u32 base) | 5039 | static void init_unused_ring(struct drm_device *dev, u32 base) |
5007 | { | 5040 | { |
5008 | struct drm_i915_private *dev_priv = dev->dev_private; | 5041 | struct drm_i915_private *dev_priv = to_i915(dev); |
5009 | 5042 | ||
5010 | I915_WRITE(RING_CTL(base), 0); | 5043 | I915_WRITE(RING_CTL(base), 0); |
5011 | I915_WRITE(RING_HEAD(base), 0); | 5044 | I915_WRITE(RING_HEAD(base), 0); |
@@ -5032,7 +5065,7 @@ static void init_unused_rings(struct drm_device *dev) | |||
5032 | 5065 | ||
5033 | int i915_gem_init_engines(struct drm_device *dev) | 5066 | int i915_gem_init_engines(struct drm_device *dev) |
5034 | { | 5067 | { |
5035 | struct drm_i915_private *dev_priv = dev->dev_private; | 5068 | struct drm_i915_private *dev_priv = to_i915(dev); |
5036 | int ret; | 5069 | int ret; |
5037 | 5070 | ||
5038 | ret = intel_init_render_ring_buffer(dev); | 5071 | ret = intel_init_render_ring_buffer(dev); |
@@ -5080,7 +5113,7 @@ cleanup_render_ring: | |||
5080 | int | 5113 | int |
5081 | i915_gem_init_hw(struct drm_device *dev) | 5114 | i915_gem_init_hw(struct drm_device *dev) |
5082 | { | 5115 | { |
5083 | struct drm_i915_private *dev_priv = dev->dev_private; | 5116 | struct drm_i915_private *dev_priv = to_i915(dev); |
5084 | struct intel_engine_cs *engine; | 5117 | struct intel_engine_cs *engine; |
5085 | int ret; | 5118 | int ret; |
5086 | 5119 | ||
@@ -5138,12 +5171,6 @@ i915_gem_init_hw(struct drm_device *dev) | |||
5138 | if (ret) | 5171 | if (ret) |
5139 | goto out; | 5172 | goto out; |
5140 | 5173 | ||
5141 | /* | ||
5142 | * Increment the next seqno by 0x100 so we have a visible break | ||
5143 | * on re-initialisation | ||
5144 | */ | ||
5145 | ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); | ||
5146 | |||
5147 | out: | 5174 | out: |
5148 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 5175 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
5149 | return ret; | 5176 | return ret; |
@@ -5151,7 +5178,7 @@ out: | |||
5151 | 5178 | ||
5152 | int i915_gem_init(struct drm_device *dev) | 5179 | int i915_gem_init(struct drm_device *dev) |
5153 | { | 5180 | { |
5154 | struct drm_i915_private *dev_priv = dev->dev_private; | 5181 | struct drm_i915_private *dev_priv = to_i915(dev); |
5155 | int ret; | 5182 | int ret; |
5156 | 5183 | ||
5157 | mutex_lock(&dev->struct_mutex); | 5184 | mutex_lock(&dev->struct_mutex); |
@@ -5208,7 +5235,7 @@ out_unlock: | |||
5208 | void | 5235 | void |
5209 | i915_gem_cleanup_engines(struct drm_device *dev) | 5236 | i915_gem_cleanup_engines(struct drm_device *dev) |
5210 | { | 5237 | { |
5211 | struct drm_i915_private *dev_priv = dev->dev_private; | 5238 | struct drm_i915_private *dev_priv = to_i915(dev); |
5212 | struct intel_engine_cs *engine; | 5239 | struct intel_engine_cs *engine; |
5213 | 5240 | ||
5214 | for_each_engine(engine, dev_priv) | 5241 | for_each_engine(engine, dev_priv) |
@@ -5225,7 +5252,7 @@ init_engine_lists(struct intel_engine_cs *engine) | |||
5225 | void | 5252 | void |
5226 | i915_gem_load_init_fences(struct drm_i915_private *dev_priv) | 5253 | i915_gem_load_init_fences(struct drm_i915_private *dev_priv) |
5227 | { | 5254 | { |
5228 | struct drm_device *dev = dev_priv->dev; | 5255 | struct drm_device *dev = &dev_priv->drm; |
5229 | 5256 | ||
5230 | if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && | 5257 | if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && |
5231 | !IS_CHERRYVIEW(dev_priv)) | 5258 | !IS_CHERRYVIEW(dev_priv)) |
@@ -5249,7 +5276,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) | |||
5249 | void | 5276 | void |
5250 | i915_gem_load_init(struct drm_device *dev) | 5277 | i915_gem_load_init(struct drm_device *dev) |
5251 | { | 5278 | { |
5252 | struct drm_i915_private *dev_priv = dev->dev_private; | 5279 | struct drm_i915_private *dev_priv = to_i915(dev); |
5253 | int i; | 5280 | int i; |
5254 | 5281 | ||
5255 | dev_priv->objects = | 5282 | dev_priv->objects = |
@@ -5277,22 +5304,15 @@ i915_gem_load_init(struct drm_device *dev) | |||
5277 | init_engine_lists(&dev_priv->engine[i]); | 5304 | init_engine_lists(&dev_priv->engine[i]); |
5278 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) | 5305 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
5279 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 5306 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
5280 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 5307 | INIT_DELAYED_WORK(&dev_priv->gt.retire_work, |
5281 | i915_gem_retire_work_handler); | 5308 | i915_gem_retire_work_handler); |
5282 | INIT_DELAYED_WORK(&dev_priv->mm.idle_work, | 5309 | INIT_DELAYED_WORK(&dev_priv->gt.idle_work, |
5283 | i915_gem_idle_work_handler); | 5310 | i915_gem_idle_work_handler); |
5311 | init_waitqueue_head(&dev_priv->gpu_error.wait_queue); | ||
5284 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); | 5312 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
5285 | 5313 | ||
5286 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; | 5314 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
5287 | 5315 | ||
5288 | /* | ||
5289 | * Set initial sequence number for requests. | ||
5290 | * Using this number allows the wraparound to happen early, | ||
5291 | * catching any obvious problems. | ||
5292 | */ | ||
5293 | dev_priv->next_seqno = ((u32)~0 - 0x1100); | ||
5294 | dev_priv->last_seqno = ((u32)~0 - 0x1101); | ||
5295 | |||
5296 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 5316 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
5297 | 5317 | ||
5298 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 5318 | init_waitqueue_head(&dev_priv->pending_flip_queue); |
@@ -5378,7 +5398,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) | |||
5378 | return -ENOMEM; | 5398 | return -ENOMEM; |
5379 | 5399 | ||
5380 | file->driver_priv = file_priv; | 5400 | file->driver_priv = file_priv; |
5381 | file_priv->dev_priv = dev->dev_private; | 5401 | file_priv->dev_priv = to_i915(dev); |
5382 | file_priv->file = file; | 5402 | file_priv->file = file; |
5383 | INIT_LIST_HEAD(&file_priv->rps.link); | 5403 | INIT_LIST_HEAD(&file_priv->rps.link); |
5384 | 5404 | ||
@@ -5424,7 +5444,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
5424 | u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, | 5444 | u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, |
5425 | struct i915_address_space *vm) | 5445 | struct i915_address_space *vm) |
5426 | { | 5446 | { |
5427 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; | 5447 | struct drm_i915_private *dev_priv = to_i915(o->base.dev); |
5428 | struct i915_vma *vma; | 5448 | struct i915_vma *vma; |
5429 | 5449 | ||
5430 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); | 5450 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
@@ -5528,7 +5548,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) | |||
5528 | struct page *page; | 5548 | struct page *page; |
5529 | 5549 | ||
5530 | /* Only default objects have per-page dirty tracking */ | 5550 | /* Only default objects have per-page dirty tracking */ |
5531 | if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) | 5551 | if (WARN_ON(!i915_gem_object_has_struct_page(obj))) |
5532 | return NULL; | 5552 | return NULL; |
5533 | 5553 | ||
5534 | page = i915_gem_object_get_page(obj, n); | 5554 | page = i915_gem_object_get_page(obj, n); |