diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 361 |
1 files changed, 253 insertions, 108 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index debad5c04cc0..37427e4016cb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -34,10 +34,6 @@ | |||
34 | 34 | ||
35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | 35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
36 | 36 | ||
37 | static void | ||
38 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | ||
39 | uint32_t read_domains, | ||
40 | uint32_t write_domain); | ||
41 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 37 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
42 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 38 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
43 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
@@ -52,7 +48,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | |||
52 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 48 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
53 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
54 | unsigned alignment); | 50 | unsigned alignment); |
55 | static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 51 | static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write); |
56 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 52 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
57 | static int i915_gem_evict_something(struct drm_device *dev); | 53 | static int i915_gem_evict_something(struct drm_device *dev); |
58 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 54 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
@@ -567,6 +563,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
567 | pgoff_t page_offset; | 563 | pgoff_t page_offset; |
568 | unsigned long pfn; | 564 | unsigned long pfn; |
569 | int ret = 0; | 565 | int ret = 0; |
566 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | ||
570 | 567 | ||
571 | /* We don't use vmf->pgoff since that has the fake offset */ | 568 | /* We don't use vmf->pgoff since that has the fake offset */ |
572 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 569 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> |
@@ -585,8 +582,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
585 | 582 | ||
586 | /* Need a new fence register? */ | 583 | /* Need a new fence register? */ |
587 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | 584 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && |
588 | obj_priv->tiling_mode != I915_TILING_NONE) | 585 | obj_priv->tiling_mode != I915_TILING_NONE) { |
589 | i915_gem_object_get_fence_reg(obj); | 586 | ret = i915_gem_object_get_fence_reg(obj, write); |
587 | if (ret) { | ||
588 | mutex_unlock(&dev->struct_mutex); | ||
589 | return VM_FAULT_SIGBUS; | ||
590 | } | ||
591 | } | ||
590 | 592 | ||
591 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 593 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + |
592 | page_offset; | 594 | page_offset; |
@@ -601,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
601 | case -EAGAIN: | 603 | case -EAGAIN: |
602 | return VM_FAULT_OOM; | 604 | return VM_FAULT_OOM; |
603 | case -EFAULT: | 605 | case -EFAULT: |
604 | case -EBUSY: | ||
605 | DRM_ERROR("can't insert pfn?? fault or busy...\n"); | ||
606 | return VM_FAULT_SIGBUS; | 606 | return VM_FAULT_SIGBUS; |
607 | default: | 607 | default: |
608 | return VM_FAULT_NOPAGE; | 608 | return VM_FAULT_NOPAGE; |
@@ -678,6 +678,30 @@ out_free_list: | |||
678 | return ret; | 678 | return ret; |
679 | } | 679 | } |
680 | 680 | ||
681 | static void | ||
682 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | ||
683 | { | ||
684 | struct drm_device *dev = obj->dev; | ||
685 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
686 | struct drm_gem_mm *mm = dev->mm_private; | ||
687 | struct drm_map_list *list; | ||
688 | |||
689 | list = &obj->map_list; | ||
690 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
691 | |||
692 | if (list->file_offset_node) { | ||
693 | drm_mm_put_block(list->file_offset_node); | ||
694 | list->file_offset_node = NULL; | ||
695 | } | ||
696 | |||
697 | if (list->map) { | ||
698 | drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); | ||
699 | list->map = NULL; | ||
700 | } | ||
701 | |||
702 | obj_priv->mmap_offset = 0; | ||
703 | } | ||
704 | |||
681 | /** | 705 | /** |
682 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | 706 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object |
683 | * @obj: object to check | 707 | * @obj: object to check |
@@ -752,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
752 | 776 | ||
753 | if (!obj_priv->mmap_offset) { | 777 | if (!obj_priv->mmap_offset) { |
754 | ret = i915_gem_create_mmap_offset(obj); | 778 | ret = i915_gem_create_mmap_offset(obj); |
755 | if (ret) | 779 | if (ret) { |
780 | drm_gem_object_unreference(obj); | ||
781 | mutex_unlock(&dev->struct_mutex); | ||
756 | return ret; | 782 | return ret; |
783 | } | ||
757 | } | 784 | } |
758 | 785 | ||
759 | args->offset = obj_priv->mmap_offset; | 786 | args->offset = obj_priv->mmap_offset; |
@@ -1024,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1024 | drm_i915_private_t *dev_priv = dev->dev_private; | 1051 | drm_i915_private_t *dev_priv = dev->dev_private; |
1025 | uint32_t seqno; | 1052 | uint32_t seqno; |
1026 | 1053 | ||
1054 | if (!dev_priv->hw_status_page) | ||
1055 | return; | ||
1056 | |||
1027 | seqno = i915_get_gem_seqno(dev); | 1057 | seqno = i915_get_gem_seqno(dev); |
1028 | 1058 | ||
1029 | while (!list_empty(&dev_priv->mm.request_list)) { | 1059 | while (!list_empty(&dev_priv->mm.request_list)) { |
@@ -1211,7 +1241,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
1211 | /** | 1241 | /** |
1212 | * Unbinds an object from the GTT aperture. | 1242 | * Unbinds an object from the GTT aperture. |
1213 | */ | 1243 | */ |
1214 | static int | 1244 | int |
1215 | i915_gem_object_unbind(struct drm_gem_object *obj) | 1245 | i915_gem_object_unbind(struct drm_gem_object *obj) |
1216 | { | 1246 | { |
1217 | struct drm_device *dev = obj->dev; | 1247 | struct drm_device *dev = obj->dev; |
@@ -1445,21 +1475,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1445 | drm_i915_private_t *dev_priv = dev->dev_private; | 1475 | drm_i915_private_t *dev_priv = dev->dev_private; |
1446 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1476 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1447 | int regnum = obj_priv->fence_reg; | 1477 | int regnum = obj_priv->fence_reg; |
1448 | uint32_t val; | 1478 | int tile_width; |
1479 | uint32_t fence_reg, val; | ||
1449 | uint32_t pitch_val; | 1480 | uint32_t pitch_val; |
1450 | 1481 | ||
1451 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 1482 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || |
1452 | (obj_priv->gtt_offset & (obj->size - 1))) { | 1483 | (obj_priv->gtt_offset & (obj->size - 1))) { |
1453 | WARN(1, "%s: object not 1M or size aligned\n", __func__); | 1484 | WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", |
1485 | __func__, obj_priv->gtt_offset, obj->size); | ||
1454 | return; | 1486 | return; |
1455 | } | 1487 | } |
1456 | 1488 | ||
1457 | if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || | 1489 | if (obj_priv->tiling_mode == I915_TILING_Y && |
1458 | IS_I945GM(dev) || | 1490 | HAS_128_BYTE_Y_TILING(dev)) |
1459 | IS_G33(dev))) | 1491 | tile_width = 128; |
1460 | pitch_val = (obj_priv->stride / 128) - 1; | ||
1461 | else | 1492 | else |
1462 | pitch_val = (obj_priv->stride / 512) - 1; | 1493 | tile_width = 512; |
1494 | |||
1495 | /* Note: pitch better be a power of two tile widths */ | ||
1496 | pitch_val = obj_priv->stride / tile_width; | ||
1497 | pitch_val = ffs(pitch_val) - 1; | ||
1463 | 1498 | ||
1464 | val = obj_priv->gtt_offset; | 1499 | val = obj_priv->gtt_offset; |
1465 | if (obj_priv->tiling_mode == I915_TILING_Y) | 1500 | if (obj_priv->tiling_mode == I915_TILING_Y) |
@@ -1468,7 +1503,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1468 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 1503 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
1469 | val |= I830_FENCE_REG_VALID; | 1504 | val |= I830_FENCE_REG_VALID; |
1470 | 1505 | ||
1471 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | 1506 | if (regnum < 8) |
1507 | fence_reg = FENCE_REG_830_0 + (regnum * 4); | ||
1508 | else | ||
1509 | fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); | ||
1510 | I915_WRITE(fence_reg, val); | ||
1472 | } | 1511 | } |
1473 | 1512 | ||
1474 | static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | 1513 | static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) |
@@ -1483,7 +1522,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1483 | 1522 | ||
1484 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 1523 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || |
1485 | (obj_priv->gtt_offset & (obj->size - 1))) { | 1524 | (obj_priv->gtt_offset & (obj->size - 1))) { |
1486 | WARN(1, "%s: object not 1M or size aligned\n", __func__); | 1525 | WARN(1, "%s: object 0x%08x not 1M or size aligned\n", |
1526 | __func__, obj_priv->gtt_offset); | ||
1487 | return; | 1527 | return; |
1488 | } | 1528 | } |
1489 | 1529 | ||
@@ -1503,6 +1543,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1503 | /** | 1543 | /** |
1504 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | 1544 | * i915_gem_object_get_fence_reg - set up a fence reg for an object |
1505 | * @obj: object to map through a fence reg | 1545 | * @obj: object to map through a fence reg |
1546 | * @write: object is about to be written | ||
1506 | * | 1547 | * |
1507 | * When mapping objects through the GTT, userspace wants to be able to write | 1548 | * When mapping objects through the GTT, userspace wants to be able to write |
1508 | * to them without having to worry about swizzling if the object is tiled. | 1549 | * to them without having to worry about swizzling if the object is tiled. |
@@ -1513,49 +1554,77 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
1513 | * It then sets up the reg based on the object's properties: address, pitch | 1554 | * It then sets up the reg based on the object's properties: address, pitch |
1514 | * and tiling format. | 1555 | * and tiling format. |
1515 | */ | 1556 | */ |
1516 | static void | 1557 | static int |
1517 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | 1558 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) |
1518 | { | 1559 | { |
1519 | struct drm_device *dev = obj->dev; | 1560 | struct drm_device *dev = obj->dev; |
1520 | struct drm_i915_private *dev_priv = dev->dev_private; | 1561 | struct drm_i915_private *dev_priv = dev->dev_private; |
1521 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1562 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1522 | struct drm_i915_fence_reg *reg = NULL; | 1563 | struct drm_i915_fence_reg *reg = NULL; |
1523 | int i, ret; | 1564 | struct drm_i915_gem_object *old_obj_priv = NULL; |
1565 | int i, ret, avail; | ||
1524 | 1566 | ||
1525 | switch (obj_priv->tiling_mode) { | 1567 | switch (obj_priv->tiling_mode) { |
1526 | case I915_TILING_NONE: | 1568 | case I915_TILING_NONE: |
1527 | WARN(1, "allocating a fence for non-tiled object?\n"); | 1569 | WARN(1, "allocating a fence for non-tiled object?\n"); |
1528 | break; | 1570 | break; |
1529 | case I915_TILING_X: | 1571 | case I915_TILING_X: |
1530 | WARN(obj_priv->stride & (512 - 1), | 1572 | if (!obj_priv->stride) |
1531 | "object is X tiled but has non-512B pitch\n"); | 1573 | return -EINVAL; |
1574 | WARN((obj_priv->stride & (512 - 1)), | ||
1575 | "object 0x%08x is X tiled but has non-512B pitch\n", | ||
1576 | obj_priv->gtt_offset); | ||
1532 | break; | 1577 | break; |
1533 | case I915_TILING_Y: | 1578 | case I915_TILING_Y: |
1534 | WARN(obj_priv->stride & (128 - 1), | 1579 | if (!obj_priv->stride) |
1535 | "object is Y tiled but has non-128B pitch\n"); | 1580 | return -EINVAL; |
1581 | WARN((obj_priv->stride & (128 - 1)), | ||
1582 | "object 0x%08x is Y tiled but has non-128B pitch\n", | ||
1583 | obj_priv->gtt_offset); | ||
1536 | break; | 1584 | break; |
1537 | } | 1585 | } |
1538 | 1586 | ||
1539 | /* First try to find a free reg */ | 1587 | /* First try to find a free reg */ |
1588 | try_again: | ||
1589 | avail = 0; | ||
1540 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | 1590 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { |
1541 | reg = &dev_priv->fence_regs[i]; | 1591 | reg = &dev_priv->fence_regs[i]; |
1542 | if (!reg->obj) | 1592 | if (!reg->obj) |
1543 | break; | 1593 | break; |
1594 | |||
1595 | old_obj_priv = reg->obj->driver_private; | ||
1596 | if (!old_obj_priv->pin_count) | ||
1597 | avail++; | ||
1544 | } | 1598 | } |
1545 | 1599 | ||
1546 | /* None available, try to steal one or wait for a user to finish */ | 1600 | /* None available, try to steal one or wait for a user to finish */ |
1547 | if (i == dev_priv->num_fence_regs) { | 1601 | if (i == dev_priv->num_fence_regs) { |
1548 | struct drm_i915_gem_object *old_obj_priv = NULL; | 1602 | uint32_t seqno = dev_priv->mm.next_gem_seqno; |
1549 | loff_t offset; | 1603 | loff_t offset; |
1550 | 1604 | ||
1551 | try_again: | 1605 | if (avail == 0) |
1552 | /* Could try to use LRU here instead... */ | 1606 | return -ENOMEM; |
1607 | |||
1553 | for (i = dev_priv->fence_reg_start; | 1608 | for (i = dev_priv->fence_reg_start; |
1554 | i < dev_priv->num_fence_regs; i++) { | 1609 | i < dev_priv->num_fence_regs; i++) { |
1610 | uint32_t this_seqno; | ||
1611 | |||
1555 | reg = &dev_priv->fence_regs[i]; | 1612 | reg = &dev_priv->fence_regs[i]; |
1556 | old_obj_priv = reg->obj->driver_private; | 1613 | old_obj_priv = reg->obj->driver_private; |
1557 | if (!old_obj_priv->pin_count) | 1614 | |
1615 | if (old_obj_priv->pin_count) | ||
1616 | continue; | ||
1617 | |||
1618 | /* i915 uses fences for GPU access to tiled buffers */ | ||
1619 | if (IS_I965G(dev) || !old_obj_priv->active) | ||
1558 | break; | 1620 | break; |
1621 | |||
1622 | /* find the seqno of the first available fence */ | ||
1623 | this_seqno = old_obj_priv->last_rendering_seqno; | ||
1624 | if (this_seqno != 0 && | ||
1625 | reg->obj->write_domain == 0 && | ||
1626 | i915_seqno_passed(seqno, this_seqno)) | ||
1627 | seqno = this_seqno; | ||
1559 | } | 1628 | } |
1560 | 1629 | ||
1561 | /* | 1630 | /* |
@@ -1563,14 +1632,25 @@ try_again: | |||
1563 | * objects to finish before trying again. | 1632 | * objects to finish before trying again. |
1564 | */ | 1633 | */ |
1565 | if (i == dev_priv->num_fence_regs) { | 1634 | if (i == dev_priv->num_fence_regs) { |
1566 | ret = i915_gem_object_wait_rendering(reg->obj); | 1635 | if (seqno == dev_priv->mm.next_gem_seqno) { |
1567 | if (ret) { | 1636 | i915_gem_flush(dev, |
1568 | WARN(ret, "wait_rendering failed: %d\n", ret); | 1637 | I915_GEM_GPU_DOMAINS, |
1569 | return; | 1638 | I915_GEM_GPU_DOMAINS); |
1639 | seqno = i915_add_request(dev, | ||
1640 | I915_GEM_GPU_DOMAINS); | ||
1641 | if (seqno == 0) | ||
1642 | return -ENOMEM; | ||
1570 | } | 1643 | } |
1644 | |||
1645 | ret = i915_wait_request(dev, seqno); | ||
1646 | if (ret) | ||
1647 | return ret; | ||
1571 | goto try_again; | 1648 | goto try_again; |
1572 | } | 1649 | } |
1573 | 1650 | ||
1651 | BUG_ON(old_obj_priv->active || | ||
1652 | (reg->obj->write_domain & I915_GEM_GPU_DOMAINS)); | ||
1653 | |||
1574 | /* | 1654 | /* |
1575 | * Zap this virtual mapping so we can set up a fence again | 1655 | * Zap this virtual mapping so we can set up a fence again |
1576 | * for this object next time we need it. | 1656 | * for this object next time we need it. |
@@ -1591,6 +1671,8 @@ try_again: | |||
1591 | i915_write_fence_reg(reg); | 1671 | i915_write_fence_reg(reg); |
1592 | else | 1672 | else |
1593 | i830_write_fence_reg(reg); | 1673 | i830_write_fence_reg(reg); |
1674 | |||
1675 | return 0; | ||
1594 | } | 1676 | } |
1595 | 1677 | ||
1596 | /** | 1678 | /** |
@@ -1609,8 +1691,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
1609 | 1691 | ||
1610 | if (IS_I965G(dev)) | 1692 | if (IS_I965G(dev)) |
1611 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 1693 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); |
1612 | else | 1694 | else { |
1613 | I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); | 1695 | uint32_t fence_reg; |
1696 | |||
1697 | if (obj_priv->fence_reg < 8) | ||
1698 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | ||
1699 | else | ||
1700 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - | ||
1701 | 8) * 4; | ||
1702 | |||
1703 | I915_WRITE(fence_reg, 0); | ||
1704 | } | ||
1614 | 1705 | ||
1615 | dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; | 1706 | dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; |
1616 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 1707 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
@@ -1631,7 +1722,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1631 | if (dev_priv->mm.suspended) | 1722 | if (dev_priv->mm.suspended) |
1632 | return -EBUSY; | 1723 | return -EBUSY; |
1633 | if (alignment == 0) | 1724 | if (alignment == 0) |
1634 | alignment = PAGE_SIZE; | 1725 | alignment = i915_gem_get_gtt_alignment(obj); |
1635 | if (alignment & (PAGE_SIZE - 1)) { | 1726 | if (alignment & (PAGE_SIZE - 1)) { |
1636 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); | 1727 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
1637 | return -EINVAL; | 1728 | return -EINVAL; |
@@ -1974,30 +2065,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
1974 | * drm_agp_chipset_flush | 2065 | * drm_agp_chipset_flush |
1975 | */ | 2066 | */ |
1976 | static void | 2067 | static void |
1977 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | 2068 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
1978 | uint32_t read_domains, | ||
1979 | uint32_t write_domain) | ||
1980 | { | 2069 | { |
1981 | struct drm_device *dev = obj->dev; | 2070 | struct drm_device *dev = obj->dev; |
1982 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2071 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1983 | uint32_t invalidate_domains = 0; | 2072 | uint32_t invalidate_domains = 0; |
1984 | uint32_t flush_domains = 0; | 2073 | uint32_t flush_domains = 0; |
1985 | 2074 | ||
1986 | BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); | 2075 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); |
1987 | BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); | 2076 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); |
1988 | 2077 | ||
1989 | #if WATCH_BUF | 2078 | #if WATCH_BUF |
1990 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | 2079 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", |
1991 | __func__, obj, | 2080 | __func__, obj, |
1992 | obj->read_domains, read_domains, | 2081 | obj->read_domains, obj->pending_read_domains, |
1993 | obj->write_domain, write_domain); | 2082 | obj->write_domain, obj->pending_write_domain); |
1994 | #endif | 2083 | #endif |
1995 | /* | 2084 | /* |
1996 | * If the object isn't moving to a new write domain, | 2085 | * If the object isn't moving to a new write domain, |
1997 | * let the object stay in multiple read domains | 2086 | * let the object stay in multiple read domains |
1998 | */ | 2087 | */ |
1999 | if (write_domain == 0) | 2088 | if (obj->pending_write_domain == 0) |
2000 | read_domains |= obj->read_domains; | 2089 | obj->pending_read_domains |= obj->read_domains; |
2001 | else | 2090 | else |
2002 | obj_priv->dirty = 1; | 2091 | obj_priv->dirty = 1; |
2003 | 2092 | ||
@@ -2007,15 +2096,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
2007 | * any read domains which differ from the old | 2096 | * any read domains which differ from the old |
2008 | * write domain | 2097 | * write domain |
2009 | */ | 2098 | */ |
2010 | if (obj->write_domain && obj->write_domain != read_domains) { | 2099 | if (obj->write_domain && |
2100 | obj->write_domain != obj->pending_read_domains) { | ||
2011 | flush_domains |= obj->write_domain; | 2101 | flush_domains |= obj->write_domain; |
2012 | invalidate_domains |= read_domains & ~obj->write_domain; | 2102 | invalidate_domains |= |
2103 | obj->pending_read_domains & ~obj->write_domain; | ||
2013 | } | 2104 | } |
2014 | /* | 2105 | /* |
2015 | * Invalidate any read caches which may have | 2106 | * Invalidate any read caches which may have |
2016 | * stale data. That is, any new read domains. | 2107 | * stale data. That is, any new read domains. |
2017 | */ | 2108 | */ |
2018 | invalidate_domains |= read_domains & ~obj->read_domains; | 2109 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; |
2019 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | 2110 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { |
2020 | #if WATCH_BUF | 2111 | #if WATCH_BUF |
2021 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | 2112 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", |
@@ -2024,9 +2115,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
2024 | i915_gem_clflush_object(obj); | 2115 | i915_gem_clflush_object(obj); |
2025 | } | 2116 | } |
2026 | 2117 | ||
2027 | if ((write_domain | flush_domains) != 0) | 2118 | /* The actual obj->write_domain will be updated with |
2028 | obj->write_domain = write_domain; | 2119 | * pending_write_domain after we emit the accumulated flush for all |
2029 | obj->read_domains = read_domains; | 2120 | * of our domain changes in execbuffers (which clears objects' |
2121 | * write_domains). So if we have a current write domain that we | ||
2122 | * aren't changing, set pending_write_domain to that. | ||
2123 | */ | ||
2124 | if (flush_domains == 0 && obj->pending_write_domain == 0) | ||
2125 | obj->pending_write_domain = obj->write_domain; | ||
2126 | obj->read_domains = obj->pending_read_domains; | ||
2030 | 2127 | ||
2031 | dev->invalidate_domains |= invalidate_domains; | 2128 | dev->invalidate_domains |= invalidate_domains; |
2032 | dev->flush_domains |= flush_domains; | 2129 | dev->flush_domains |= flush_domains; |
@@ -2229,6 +2326,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2229 | (int) reloc.offset, | 2326 | (int) reloc.offset, |
2230 | reloc.read_domains, | 2327 | reloc.read_domains, |
2231 | reloc.write_domain); | 2328 | reloc.write_domain); |
2329 | drm_gem_object_unreference(target_obj); | ||
2330 | i915_gem_object_unpin(obj); | ||
2232 | return -EINVAL; | 2331 | return -EINVAL; |
2233 | } | 2332 | } |
2234 | 2333 | ||
@@ -2415,6 +2514,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2415 | struct drm_i915_gem_exec_object *exec_list = NULL; | 2514 | struct drm_i915_gem_exec_object *exec_list = NULL; |
2416 | struct drm_gem_object **object_list = NULL; | 2515 | struct drm_gem_object **object_list = NULL; |
2417 | struct drm_gem_object *batch_obj; | 2516 | struct drm_gem_object *batch_obj; |
2517 | struct drm_i915_gem_object *obj_priv; | ||
2418 | int ret, i, pinned = 0; | 2518 | int ret, i, pinned = 0; |
2419 | uint64_t exec_offset; | 2519 | uint64_t exec_offset; |
2420 | uint32_t seqno, flush_domains; | 2520 | uint32_t seqno, flush_domains; |
@@ -2458,13 +2558,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2458 | if (dev_priv->mm.wedged) { | 2558 | if (dev_priv->mm.wedged) { |
2459 | DRM_ERROR("Execbuf while wedged\n"); | 2559 | DRM_ERROR("Execbuf while wedged\n"); |
2460 | mutex_unlock(&dev->struct_mutex); | 2560 | mutex_unlock(&dev->struct_mutex); |
2461 | return -EIO; | 2561 | ret = -EIO; |
2562 | goto pre_mutex_err; | ||
2462 | } | 2563 | } |
2463 | 2564 | ||
2464 | if (dev_priv->mm.suspended) { | 2565 | if (dev_priv->mm.suspended) { |
2465 | DRM_ERROR("Execbuf while VT-switched.\n"); | 2566 | DRM_ERROR("Execbuf while VT-switched.\n"); |
2466 | mutex_unlock(&dev->struct_mutex); | 2567 | mutex_unlock(&dev->struct_mutex); |
2467 | return -EBUSY; | 2568 | ret = -EBUSY; |
2569 | goto pre_mutex_err; | ||
2468 | } | 2570 | } |
2469 | 2571 | ||
2470 | /* Look up object handles */ | 2572 | /* Look up object handles */ |
@@ -2477,6 +2579,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2477 | ret = -EBADF; | 2579 | ret = -EBADF; |
2478 | goto err; | 2580 | goto err; |
2479 | } | 2581 | } |
2582 | |||
2583 | obj_priv = object_list[i]->driver_private; | ||
2584 | if (obj_priv->in_execbuffer) { | ||
2585 | DRM_ERROR("Object %p appears more than once in object list\n", | ||
2586 | object_list[i]); | ||
2587 | ret = -EBADF; | ||
2588 | goto err; | ||
2589 | } | ||
2590 | obj_priv->in_execbuffer = true; | ||
2480 | } | 2591 | } |
2481 | 2592 | ||
2482 | /* Pin and relocate */ | 2593 | /* Pin and relocate */ |
@@ -2532,9 +2643,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2532 | struct drm_gem_object *obj = object_list[i]; | 2643 | struct drm_gem_object *obj = object_list[i]; |
2533 | 2644 | ||
2534 | /* Compute new gpu domains and update invalidate/flush */ | 2645 | /* Compute new gpu domains and update invalidate/flush */ |
2535 | i915_gem_object_set_to_gpu_domain(obj, | 2646 | i915_gem_object_set_to_gpu_domain(obj); |
2536 | obj->pending_read_domains, | ||
2537 | obj->pending_write_domain); | ||
2538 | } | 2647 | } |
2539 | 2648 | ||
2540 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2649 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -2553,6 +2662,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2553 | (void)i915_add_request(dev, dev->flush_domains); | 2662 | (void)i915_add_request(dev, dev->flush_domains); |
2554 | } | 2663 | } |
2555 | 2664 | ||
2665 | for (i = 0; i < args->buffer_count; i++) { | ||
2666 | struct drm_gem_object *obj = object_list[i]; | ||
2667 | |||
2668 | obj->write_domain = obj->pending_write_domain; | ||
2669 | } | ||
2670 | |||
2556 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2671 | i915_verify_inactive(dev, __FILE__, __LINE__); |
2557 | 2672 | ||
2558 | #if WATCH_COHERENCY | 2673 | #if WATCH_COHERENCY |
@@ -2610,24 +2725,32 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2610 | 2725 | ||
2611 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2726 | i915_verify_inactive(dev, __FILE__, __LINE__); |
2612 | 2727 | ||
2613 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
2614 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
2615 | (uintptr_t) args->buffers_ptr, | ||
2616 | exec_list, | ||
2617 | sizeof(*exec_list) * args->buffer_count); | ||
2618 | if (ret) | ||
2619 | DRM_ERROR("failed to copy %d exec entries " | ||
2620 | "back to user (%d)\n", | ||
2621 | args->buffer_count, ret); | ||
2622 | err: | 2728 | err: |
2623 | for (i = 0; i < pinned; i++) | 2729 | for (i = 0; i < pinned; i++) |
2624 | i915_gem_object_unpin(object_list[i]); | 2730 | i915_gem_object_unpin(object_list[i]); |
2625 | 2731 | ||
2626 | for (i = 0; i < args->buffer_count; i++) | 2732 | for (i = 0; i < args->buffer_count; i++) { |
2733 | if (object_list[i]) { | ||
2734 | obj_priv = object_list[i]->driver_private; | ||
2735 | obj_priv->in_execbuffer = false; | ||
2736 | } | ||
2627 | drm_gem_object_unreference(object_list[i]); | 2737 | drm_gem_object_unreference(object_list[i]); |
2738 | } | ||
2628 | 2739 | ||
2629 | mutex_unlock(&dev->struct_mutex); | 2740 | mutex_unlock(&dev->struct_mutex); |
2630 | 2741 | ||
2742 | if (!ret) { | ||
2743 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
2744 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
2745 | (uintptr_t) args->buffers_ptr, | ||
2746 | exec_list, | ||
2747 | sizeof(*exec_list) * args->buffer_count); | ||
2748 | if (ret) | ||
2749 | DRM_ERROR("failed to copy %d exec entries " | ||
2750 | "back to user (%d)\n", | ||
2751 | args->buffer_count, ret); | ||
2752 | } | ||
2753 | |||
2631 | pre_mutex_err: | 2754 | pre_mutex_err: |
2632 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, | 2755 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, |
2633 | DRM_MEM_DRIVER); | 2756 | DRM_MEM_DRIVER); |
@@ -2649,7 +2772,22 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
2649 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 2772 | ret = i915_gem_object_bind_to_gtt(obj, alignment); |
2650 | if (ret != 0) { | 2773 | if (ret != 0) { |
2651 | if (ret != -EBUSY && ret != -ERESTARTSYS) | 2774 | if (ret != -EBUSY && ret != -ERESTARTSYS) |
2652 | DRM_ERROR("Failure to bind: %d", ret); | 2775 | DRM_ERROR("Failure to bind: %d\n", ret); |
2776 | return ret; | ||
2777 | } | ||
2778 | } | ||
2779 | /* | ||
2780 | * Pre-965 chips need a fence register set up in order to | ||
2781 | * properly handle tiled surfaces. | ||
2782 | */ | ||
2783 | if (!IS_I965G(dev) && | ||
2784 | obj_priv->fence_reg == I915_FENCE_REG_NONE && | ||
2785 | obj_priv->tiling_mode != I915_TILING_NONE) { | ||
2786 | ret = i915_gem_object_get_fence_reg(obj, true); | ||
2787 | if (ret != 0) { | ||
2788 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
2789 | DRM_ERROR("Failure to install fence: %d\n", | ||
2790 | ret); | ||
2653 | return ret; | 2791 | return ret; |
2654 | } | 2792 | } |
2655 | } | 2793 | } |
@@ -2723,6 +2861,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
2723 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 2861 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { |
2724 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 2862 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
2725 | args->handle); | 2863 | args->handle); |
2864 | drm_gem_object_unreference(obj); | ||
2726 | mutex_unlock(&dev->struct_mutex); | 2865 | mutex_unlock(&dev->struct_mutex); |
2727 | return -EINVAL; | 2866 | return -EINVAL; |
2728 | } | 2867 | } |
@@ -2803,6 +2942,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
2803 | return -EBADF; | 2942 | return -EBADF; |
2804 | } | 2943 | } |
2805 | 2944 | ||
2945 | /* Update the active list for the hardware's current position. | ||
2946 | * Otherwise this only updates on a delayed timer or when irqs are | ||
2947 | * actually unmasked, and our working set ends up being larger than | ||
2948 | * required. | ||
2949 | */ | ||
2950 | i915_gem_retire_requests(dev); | ||
2951 | |||
2806 | obj_priv = obj->driver_private; | 2952 | obj_priv = obj->driver_private; |
2807 | /* Don't count being on the flushing list against the object being | 2953 | /* Don't count being on the flushing list against the object being |
2808 | * done. Otherwise, a buffer left on the flushing list but not getting | 2954 | * done. Otherwise, a buffer left on the flushing list but not getting |
@@ -2855,9 +3001,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
2855 | void i915_gem_free_object(struct drm_gem_object *obj) | 3001 | void i915_gem_free_object(struct drm_gem_object *obj) |
2856 | { | 3002 | { |
2857 | struct drm_device *dev = obj->dev; | 3003 | struct drm_device *dev = obj->dev; |
2858 | struct drm_gem_mm *mm = dev->mm_private; | ||
2859 | struct drm_map_list *list; | ||
2860 | struct drm_map *map; | ||
2861 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3004 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2862 | 3005 | ||
2863 | while (obj_priv->pin_count > 0) | 3006 | while (obj_priv->pin_count > 0) |
@@ -2868,19 +3011,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
2868 | 3011 | ||
2869 | i915_gem_object_unbind(obj); | 3012 | i915_gem_object_unbind(obj); |
2870 | 3013 | ||
2871 | list = &obj->map_list; | 3014 | i915_gem_free_mmap_offset(obj); |
2872 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
2873 | |||
2874 | if (list->file_offset_node) { | ||
2875 | drm_mm_put_block(list->file_offset_node); | ||
2876 | list->file_offset_node = NULL; | ||
2877 | } | ||
2878 | |||
2879 | map = list->map; | ||
2880 | if (map) { | ||
2881 | drm_free(map, sizeof(*map), DRM_MEM_DRIVER); | ||
2882 | list->map = NULL; | ||
2883 | } | ||
2884 | 3015 | ||
2885 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); | 3016 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); |
2886 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | 3017 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); |
@@ -2919,7 +3050,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | |||
2919 | return 0; | 3050 | return 0; |
2920 | } | 3051 | } |
2921 | 3052 | ||
2922 | static int | 3053 | int |
2923 | i915_gem_idle(struct drm_device *dev) | 3054 | i915_gem_idle(struct drm_device *dev) |
2924 | { | 3055 | { |
2925 | drm_i915_private_t *dev_priv = dev->dev_private; | 3056 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -3065,6 +3196,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
3065 | if (dev_priv->hw_status_page == NULL) { | 3196 | if (dev_priv->hw_status_page == NULL) { |
3066 | DRM_ERROR("Failed to map status page.\n"); | 3197 | DRM_ERROR("Failed to map status page.\n"); |
3067 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 3198 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
3199 | i915_gem_object_unpin(obj); | ||
3068 | drm_gem_object_unreference(obj); | 3200 | drm_gem_object_unreference(obj); |
3069 | return -EINVAL; | 3201 | return -EINVAL; |
3070 | } | 3202 | } |
@@ -3077,6 +3209,31 @@ i915_gem_init_hws(struct drm_device *dev) | |||
3077 | return 0; | 3209 | return 0; |
3078 | } | 3210 | } |
3079 | 3211 | ||
3212 | static void | ||
3213 | i915_gem_cleanup_hws(struct drm_device *dev) | ||
3214 | { | ||
3215 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3216 | struct drm_gem_object *obj; | ||
3217 | struct drm_i915_gem_object *obj_priv; | ||
3218 | |||
3219 | if (dev_priv->hws_obj == NULL) | ||
3220 | return; | ||
3221 | |||
3222 | obj = dev_priv->hws_obj; | ||
3223 | obj_priv = obj->driver_private; | ||
3224 | |||
3225 | kunmap(obj_priv->page_list[0]); | ||
3226 | i915_gem_object_unpin(obj); | ||
3227 | drm_gem_object_unreference(obj); | ||
3228 | dev_priv->hws_obj = NULL; | ||
3229 | |||
3230 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
3231 | dev_priv->hw_status_page = NULL; | ||
3232 | |||
3233 | /* Write high address into HWS_PGA when disabling. */ | ||
3234 | I915_WRITE(HWS_PGA, 0x1ffff000); | ||
3235 | } | ||
3236 | |||
3080 | int | 3237 | int |
3081 | i915_gem_init_ringbuffer(struct drm_device *dev) | 3238 | i915_gem_init_ringbuffer(struct drm_device *dev) |
3082 | { | 3239 | { |
@@ -3094,6 +3251,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
3094 | obj = drm_gem_object_alloc(dev, 128 * 1024); | 3251 | obj = drm_gem_object_alloc(dev, 128 * 1024); |
3095 | if (obj == NULL) { | 3252 | if (obj == NULL) { |
3096 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 3253 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
3254 | i915_gem_cleanup_hws(dev); | ||
3097 | return -ENOMEM; | 3255 | return -ENOMEM; |
3098 | } | 3256 | } |
3099 | obj_priv = obj->driver_private; | 3257 | obj_priv = obj->driver_private; |
@@ -3101,6 +3259,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
3101 | ret = i915_gem_object_pin(obj, 4096); | 3259 | ret = i915_gem_object_pin(obj, 4096); |
3102 | if (ret != 0) { | 3260 | if (ret != 0) { |
3103 | drm_gem_object_unreference(obj); | 3261 | drm_gem_object_unreference(obj); |
3262 | i915_gem_cleanup_hws(dev); | ||
3104 | return ret; | 3263 | return ret; |
3105 | } | 3264 | } |
3106 | 3265 | ||
@@ -3118,7 +3277,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
3118 | if (ring->map.handle == NULL) { | 3277 | if (ring->map.handle == NULL) { |
3119 | DRM_ERROR("Failed to map ringbuffer.\n"); | 3278 | DRM_ERROR("Failed to map ringbuffer.\n"); |
3120 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 3279 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
3280 | i915_gem_object_unpin(obj); | ||
3121 | drm_gem_object_unreference(obj); | 3281 | drm_gem_object_unreference(obj); |
3282 | i915_gem_cleanup_hws(dev); | ||
3122 | return -EINVAL; | 3283 | return -EINVAL; |
3123 | } | 3284 | } |
3124 | ring->ring_obj = obj; | 3285 | ring->ring_obj = obj; |
@@ -3198,20 +3359,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
3198 | dev_priv->ring.ring_obj = NULL; | 3359 | dev_priv->ring.ring_obj = NULL; |
3199 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 3360 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
3200 | 3361 | ||
3201 | if (dev_priv->hws_obj != NULL) { | 3362 | i915_gem_cleanup_hws(dev); |
3202 | struct drm_gem_object *obj = dev_priv->hws_obj; | ||
3203 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3204 | |||
3205 | kunmap(obj_priv->page_list[0]); | ||
3206 | i915_gem_object_unpin(obj); | ||
3207 | drm_gem_object_unreference(obj); | ||
3208 | dev_priv->hws_obj = NULL; | ||
3209 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
3210 | dev_priv->hw_status_page = NULL; | ||
3211 | |||
3212 | /* Write high address into HWS_PGA when disabling. */ | ||
3213 | I915_WRITE(HWS_PGA, 0x1ffff000); | ||
3214 | } | ||
3215 | } | 3363 | } |
3216 | 3364 | ||
3217 | int | 3365 | int |
@@ -3229,10 +3377,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
3229 | dev_priv->mm.wedged = 0; | 3377 | dev_priv->mm.wedged = 0; |
3230 | } | 3378 | } |
3231 | 3379 | ||
3232 | dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, | ||
3233 | dev->agp->agp_info.aper_size | ||
3234 | * 1024 * 1024); | ||
3235 | |||
3236 | mutex_lock(&dev->struct_mutex); | 3380 | mutex_lock(&dev->struct_mutex); |
3237 | dev_priv->mm.suspended = 0; | 3381 | dev_priv->mm.suspended = 0; |
3238 | 3382 | ||
@@ -3255,7 +3399,6 @@ int | |||
3255 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 3399 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
3256 | struct drm_file *file_priv) | 3400 | struct drm_file *file_priv) |
3257 | { | 3401 | { |
3258 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3259 | int ret; | 3402 | int ret; |
3260 | 3403 | ||
3261 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 3404 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
@@ -3264,7 +3407,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | |||
3264 | ret = i915_gem_idle(dev); | 3407 | ret = i915_gem_idle(dev); |
3265 | drm_irq_uninstall(dev); | 3408 | drm_irq_uninstall(dev); |
3266 | 3409 | ||
3267 | io_mapping_free(dev_priv->mm.gtt_mapping); | ||
3268 | return ret; | 3410 | return ret; |
3269 | } | 3411 | } |
3270 | 3412 | ||
@@ -3273,6 +3415,9 @@ i915_gem_lastclose(struct drm_device *dev) | |||
3273 | { | 3415 | { |
3274 | int ret; | 3416 | int ret; |
3275 | 3417 | ||
3418 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
3419 | return; | ||
3420 | |||
3276 | ret = i915_gem_idle(dev); | 3421 | ret = i915_gem_idle(dev); |
3277 | if (ret) | 3422 | if (ret) |
3278 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 3423 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
@@ -3294,7 +3439,7 @@ i915_gem_load(struct drm_device *dev) | |||
3294 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 3439 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
3295 | dev_priv->fence_reg_start = 3; | 3440 | dev_priv->fence_reg_start = 3; |
3296 | 3441 | ||
3297 | if (IS_I965G(dev)) | 3442 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
3298 | dev_priv->num_fence_regs = 16; | 3443 | dev_priv->num_fence_regs = 16; |
3299 | else | 3444 | else |
3300 | dev_priv->num_fence_regs = 8; | 3445 | dev_priv->num_fence_regs = 8; |
@@ -3470,7 +3615,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
3470 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 3615 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
3471 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | 3616 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; |
3472 | 3617 | ||
3473 | DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size); | 3618 | DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); |
3474 | ret = copy_from_user(obj_addr, user_data, args->size); | 3619 | ret = copy_from_user(obj_addr, user_data, args->size); |
3475 | if (ret) | 3620 | if (ret) |
3476 | return -EFAULT; | 3621 | return -EFAULT; |