aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c265
1 files changed, 170 insertions, 95 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index debad5c04cc0..25b337438ca7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,10 +34,6 @@
34 34
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36 36
37static void
38i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
41static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -52,7 +48,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
52static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54 unsigned alignment); 50 unsigned alignment);
55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 51static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 52static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57static int i915_gem_evict_something(struct drm_device *dev); 53static int i915_gem_evict_something(struct drm_device *dev);
58static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 54static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
@@ -567,6 +563,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
567 pgoff_t page_offset; 563 pgoff_t page_offset;
568 unsigned long pfn; 564 unsigned long pfn;
569 int ret = 0; 565 int ret = 0;
566 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
570 567
571 /* We don't use vmf->pgoff since that has the fake offset */ 568 /* We don't use vmf->pgoff since that has the fake offset */
572 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 569 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
@@ -585,8 +582,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
585 582
586 /* Need a new fence register? */ 583 /* Need a new fence register? */
587 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 584 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
588 obj_priv->tiling_mode != I915_TILING_NONE) 585 obj_priv->tiling_mode != I915_TILING_NONE) {
589 i915_gem_object_get_fence_reg(obj); 586 ret = i915_gem_object_get_fence_reg(obj, write);
587 if (ret) {
588 mutex_unlock(&dev->struct_mutex);
589 return VM_FAULT_SIGBUS;
590 }
591 }
590 592
591 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 593 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
592 page_offset; 594 page_offset;
@@ -601,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
601 case -EAGAIN: 603 case -EAGAIN:
602 return VM_FAULT_OOM; 604 return VM_FAULT_OOM;
603 case -EFAULT: 605 case -EFAULT:
604 case -EBUSY:
605 DRM_ERROR("can't insert pfn?? fault or busy...\n");
606 return VM_FAULT_SIGBUS; 606 return VM_FAULT_SIGBUS;
607 default: 607 default:
608 return VM_FAULT_NOPAGE; 608 return VM_FAULT_NOPAGE;
@@ -678,6 +678,30 @@ out_free_list:
678 return ret; 678 return ret;
679} 679}
680 680
681static void
682i915_gem_free_mmap_offset(struct drm_gem_object *obj)
683{
684 struct drm_device *dev = obj->dev;
685 struct drm_i915_gem_object *obj_priv = obj->driver_private;
686 struct drm_gem_mm *mm = dev->mm_private;
687 struct drm_map_list *list;
688
689 list = &obj->map_list;
690 drm_ht_remove_item(&mm->offset_hash, &list->hash);
691
692 if (list->file_offset_node) {
693 drm_mm_put_block(list->file_offset_node);
694 list->file_offset_node = NULL;
695 }
696
697 if (list->map) {
698 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
699 list->map = NULL;
700 }
701
702 obj_priv->mmap_offset = 0;
703}
704
681/** 705/**
682 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 706 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
683 * @obj: object to check 707 * @obj: object to check
@@ -752,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
752 776
753 if (!obj_priv->mmap_offset) { 777 if (!obj_priv->mmap_offset) {
754 ret = i915_gem_create_mmap_offset(obj); 778 ret = i915_gem_create_mmap_offset(obj);
755 if (ret) 779 if (ret) {
780 drm_gem_object_unreference(obj);
781 mutex_unlock(&dev->struct_mutex);
756 return ret; 782 return ret;
783 }
757 } 784 }
758 785
759 args->offset = obj_priv->mmap_offset; 786 args->offset = obj_priv->mmap_offset;
@@ -1211,7 +1238,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1211/** 1238/**
1212 * Unbinds an object from the GTT aperture. 1239 * Unbinds an object from the GTT aperture.
1213 */ 1240 */
1214static int 1241int
1215i915_gem_object_unbind(struct drm_gem_object *obj) 1242i915_gem_object_unbind(struct drm_gem_object *obj)
1216{ 1243{
1217 struct drm_device *dev = obj->dev; 1244 struct drm_device *dev = obj->dev;
@@ -1445,21 +1472,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1445 drm_i915_private_t *dev_priv = dev->dev_private; 1472 drm_i915_private_t *dev_priv = dev->dev_private;
1446 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1473 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1447 int regnum = obj_priv->fence_reg; 1474 int regnum = obj_priv->fence_reg;
1475 int tile_width;
1448 uint32_t val; 1476 uint32_t val;
1449 uint32_t pitch_val; 1477 uint32_t pitch_val;
1450 1478
1451 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1479 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1452 (obj_priv->gtt_offset & (obj->size - 1))) { 1480 (obj_priv->gtt_offset & (obj->size - 1))) {
1453 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1481 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1482 __func__, obj_priv->gtt_offset, obj->size);
1454 return; 1483 return;
1455 } 1484 }
1456 1485
1457 if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || 1486 if (obj_priv->tiling_mode == I915_TILING_Y &&
1458 IS_I945GM(dev) || 1487 HAS_128_BYTE_Y_TILING(dev))
1459 IS_G33(dev))) 1488 tile_width = 128;
1460 pitch_val = (obj_priv->stride / 128) - 1;
1461 else 1489 else
1462 pitch_val = (obj_priv->stride / 512) - 1; 1490 tile_width = 512;
1491
1492 /* Note: pitch better be a power of two tile widths */
1493 pitch_val = obj_priv->stride / tile_width;
1494 pitch_val = ffs(pitch_val) - 1;
1463 1495
1464 val = obj_priv->gtt_offset; 1496 val = obj_priv->gtt_offset;
1465 if (obj_priv->tiling_mode == I915_TILING_Y) 1497 if (obj_priv->tiling_mode == I915_TILING_Y)
@@ -1483,7 +1515,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1483 1515
1484 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1516 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1485 (obj_priv->gtt_offset & (obj->size - 1))) { 1517 (obj_priv->gtt_offset & (obj->size - 1))) {
1486 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1518 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1519 __func__, obj_priv->gtt_offset);
1487 return; 1520 return;
1488 } 1521 }
1489 1522
@@ -1503,6 +1536,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1503/** 1536/**
1504 * i915_gem_object_get_fence_reg - set up a fence reg for an object 1537 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1505 * @obj: object to map through a fence reg 1538 * @obj: object to map through a fence reg
1539 * @write: object is about to be written
1506 * 1540 *
1507 * When mapping objects through the GTT, userspace wants to be able to write 1541 * When mapping objects through the GTT, userspace wants to be able to write
1508 * to them without having to worry about swizzling if the object is tiled. 1542 * to them without having to worry about swizzling if the object is tiled.
@@ -1513,8 +1547,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1513 * It then sets up the reg based on the object's properties: address, pitch 1547 * It then sets up the reg based on the object's properties: address, pitch
1514 * and tiling format. 1548 * and tiling format.
1515 */ 1549 */
1516static void 1550static int
1517i915_gem_object_get_fence_reg(struct drm_gem_object *obj) 1551i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1518{ 1552{
1519 struct drm_device *dev = obj->dev; 1553 struct drm_device *dev = obj->dev;
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1554 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1527,12 +1561,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
1527 WARN(1, "allocating a fence for non-tiled object?\n"); 1561 WARN(1, "allocating a fence for non-tiled object?\n");
1528 break; 1562 break;
1529 case I915_TILING_X: 1563 case I915_TILING_X:
1530 WARN(obj_priv->stride & (512 - 1), 1564 if (!obj_priv->stride)
1531 "object is X tiled but has non-512B pitch\n"); 1565 return -EINVAL;
1566 WARN((obj_priv->stride & (512 - 1)),
1567 "object 0x%08x is X tiled but has non-512B pitch\n",
1568 obj_priv->gtt_offset);
1532 break; 1569 break;
1533 case I915_TILING_Y: 1570 case I915_TILING_Y:
1534 WARN(obj_priv->stride & (128 - 1), 1571 if (!obj_priv->stride)
1535 "object is Y tiled but has non-128B pitch\n"); 1572 return -EINVAL;
1573 WARN((obj_priv->stride & (128 - 1)),
1574 "object 0x%08x is Y tiled but has non-128B pitch\n",
1575 obj_priv->gtt_offset);
1536 break; 1576 break;
1537 } 1577 }
1538 1578
@@ -1563,10 +1603,11 @@ try_again:
1563 * objects to finish before trying again. 1603 * objects to finish before trying again.
1564 */ 1604 */
1565 if (i == dev_priv->num_fence_regs) { 1605 if (i == dev_priv->num_fence_regs) {
1566 ret = i915_gem_object_wait_rendering(reg->obj); 1606 ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
1567 if (ret) { 1607 if (ret) {
1568 WARN(ret, "wait_rendering failed: %d\n", ret); 1608 WARN(ret != -ERESTARTSYS,
1569 return; 1609 "switch to GTT domain failed: %d\n", ret);
1610 return ret;
1570 } 1611 }
1571 goto try_again; 1612 goto try_again;
1572 } 1613 }
@@ -1591,6 +1632,8 @@ try_again:
1591 i915_write_fence_reg(reg); 1632 i915_write_fence_reg(reg);
1592 else 1633 else
1593 i830_write_fence_reg(reg); 1634 i830_write_fence_reg(reg);
1635
1636 return 0;
1594} 1637}
1595 1638
1596/** 1639/**
@@ -1631,7 +1674,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1631 if (dev_priv->mm.suspended) 1674 if (dev_priv->mm.suspended)
1632 return -EBUSY; 1675 return -EBUSY;
1633 if (alignment == 0) 1676 if (alignment == 0)
1634 alignment = PAGE_SIZE; 1677 alignment = i915_gem_get_gtt_alignment(obj);
1635 if (alignment & (PAGE_SIZE - 1)) { 1678 if (alignment & (PAGE_SIZE - 1)) {
1636 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 1679 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1637 return -EINVAL; 1680 return -EINVAL;
@@ -1974,30 +2017,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1974 * drm_agp_chipset_flush 2017 * drm_agp_chipset_flush
1975 */ 2018 */
1976static void 2019static void
1977i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2020i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
1978 uint32_t read_domains,
1979 uint32_t write_domain)
1980{ 2021{
1981 struct drm_device *dev = obj->dev; 2022 struct drm_device *dev = obj->dev;
1982 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2023 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1983 uint32_t invalidate_domains = 0; 2024 uint32_t invalidate_domains = 0;
1984 uint32_t flush_domains = 0; 2025 uint32_t flush_domains = 0;
1985 2026
1986 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2027 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
1987 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2028 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
1988 2029
1989#if WATCH_BUF 2030#if WATCH_BUF
1990 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2031 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1991 __func__, obj, 2032 __func__, obj,
1992 obj->read_domains, read_domains, 2033 obj->read_domains, obj->pending_read_domains,
1993 obj->write_domain, write_domain); 2034 obj->write_domain, obj->pending_write_domain);
1994#endif 2035#endif
1995 /* 2036 /*
1996 * If the object isn't moving to a new write domain, 2037 * If the object isn't moving to a new write domain,
1997 * let the object stay in multiple read domains 2038 * let the object stay in multiple read domains
1998 */ 2039 */
1999 if (write_domain == 0) 2040 if (obj->pending_write_domain == 0)
2000 read_domains |= obj->read_domains; 2041 obj->pending_read_domains |= obj->read_domains;
2001 else 2042 else
2002 obj_priv->dirty = 1; 2043 obj_priv->dirty = 1;
2003 2044
@@ -2007,15 +2048,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2007 * any read domains which differ from the old 2048 * any read domains which differ from the old
2008 * write domain 2049 * write domain
2009 */ 2050 */
2010 if (obj->write_domain && obj->write_domain != read_domains) { 2051 if (obj->write_domain &&
2052 obj->write_domain != obj->pending_read_domains) {
2011 flush_domains |= obj->write_domain; 2053 flush_domains |= obj->write_domain;
2012 invalidate_domains |= read_domains & ~obj->write_domain; 2054 invalidate_domains |=
2055 obj->pending_read_domains & ~obj->write_domain;
2013 } 2056 }
2014 /* 2057 /*
2015 * Invalidate any read caches which may have 2058 * Invalidate any read caches which may have
2016 * stale data. That is, any new read domains. 2059 * stale data. That is, any new read domains.
2017 */ 2060 */
2018 invalidate_domains |= read_domains & ~obj->read_domains; 2061 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2019 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2062 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2020#if WATCH_BUF 2063#if WATCH_BUF
2021 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 2064 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
@@ -2024,9 +2067,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2024 i915_gem_clflush_object(obj); 2067 i915_gem_clflush_object(obj);
2025 } 2068 }
2026 2069
2027 if ((write_domain | flush_domains) != 0) 2070 /* The actual obj->write_domain will be updated with
2028 obj->write_domain = write_domain; 2071 * pending_write_domain after we emit the accumulated flush for all
2029 obj->read_domains = read_domains; 2072 * of our domain changes in execbuffers (which clears objects'
2073 * write_domains). So if we have a current write domain that we
2074 * aren't changing, set pending_write_domain to that.
2075 */
2076 if (flush_domains == 0 && obj->pending_write_domain == 0)
2077 obj->pending_write_domain = obj->write_domain;
2078 obj->read_domains = obj->pending_read_domains;
2030 2079
2031 dev->invalidate_domains |= invalidate_domains; 2080 dev->invalidate_domains |= invalidate_domains;
2032 dev->flush_domains |= flush_domains; 2081 dev->flush_domains |= flush_domains;
@@ -2229,6 +2278,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2229 (int) reloc.offset, 2278 (int) reloc.offset,
2230 reloc.read_domains, 2279 reloc.read_domains,
2231 reloc.write_domain); 2280 reloc.write_domain);
2281 drm_gem_object_unreference(target_obj);
2282 i915_gem_object_unpin(obj);
2232 return -EINVAL; 2283 return -EINVAL;
2233 } 2284 }
2234 2285
@@ -2458,13 +2509,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2458 if (dev_priv->mm.wedged) { 2509 if (dev_priv->mm.wedged) {
2459 DRM_ERROR("Execbuf while wedged\n"); 2510 DRM_ERROR("Execbuf while wedged\n");
2460 mutex_unlock(&dev->struct_mutex); 2511 mutex_unlock(&dev->struct_mutex);
2461 return -EIO; 2512 ret = -EIO;
2513 goto pre_mutex_err;
2462 } 2514 }
2463 2515
2464 if (dev_priv->mm.suspended) { 2516 if (dev_priv->mm.suspended) {
2465 DRM_ERROR("Execbuf while VT-switched.\n"); 2517 DRM_ERROR("Execbuf while VT-switched.\n");
2466 mutex_unlock(&dev->struct_mutex); 2518 mutex_unlock(&dev->struct_mutex);
2467 return -EBUSY; 2519 ret = -EBUSY;
2520 goto pre_mutex_err;
2468 } 2521 }
2469 2522
2470 /* Look up object handles */ 2523 /* Look up object handles */
@@ -2532,9 +2585,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2532 struct drm_gem_object *obj = object_list[i]; 2585 struct drm_gem_object *obj = object_list[i];
2533 2586
2534 /* Compute new gpu domains and update invalidate/flush */ 2587 /* Compute new gpu domains and update invalidate/flush */
2535 i915_gem_object_set_to_gpu_domain(obj, 2588 i915_gem_object_set_to_gpu_domain(obj);
2536 obj->pending_read_domains,
2537 obj->pending_write_domain);
2538 } 2589 }
2539 2590
2540 i915_verify_inactive(dev, __FILE__, __LINE__); 2591 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2553,6 +2604,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2553 (void)i915_add_request(dev, dev->flush_domains); 2604 (void)i915_add_request(dev, dev->flush_domains);
2554 } 2605 }
2555 2606
2607 for (i = 0; i < args->buffer_count; i++) {
2608 struct drm_gem_object *obj = object_list[i];
2609
2610 obj->write_domain = obj->pending_write_domain;
2611 }
2612
2556 i915_verify_inactive(dev, __FILE__, __LINE__); 2613 i915_verify_inactive(dev, __FILE__, __LINE__);
2557 2614
2558#if WATCH_COHERENCY 2615#if WATCH_COHERENCY
@@ -2610,15 +2667,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2610 2667
2611 i915_verify_inactive(dev, __FILE__, __LINE__); 2668 i915_verify_inactive(dev, __FILE__, __LINE__);
2612 2669
2613 /* Copy the new buffer offsets back to the user's exec list. */
2614 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2615 (uintptr_t) args->buffers_ptr,
2616 exec_list,
2617 sizeof(*exec_list) * args->buffer_count);
2618 if (ret)
2619 DRM_ERROR("failed to copy %d exec entries "
2620 "back to user (%d)\n",
2621 args->buffer_count, ret);
2622err: 2670err:
2623 for (i = 0; i < pinned; i++) 2671 for (i = 0; i < pinned; i++)
2624 i915_gem_object_unpin(object_list[i]); 2672 i915_gem_object_unpin(object_list[i]);
@@ -2628,6 +2676,18 @@ err:
2628 2676
2629 mutex_unlock(&dev->struct_mutex); 2677 mutex_unlock(&dev->struct_mutex);
2630 2678
2679 if (!ret) {
2680 /* Copy the new buffer offsets back to the user's exec list. */
2681 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2682 (uintptr_t) args->buffers_ptr,
2683 exec_list,
2684 sizeof(*exec_list) * args->buffer_count);
2685 if (ret)
2686 DRM_ERROR("failed to copy %d exec entries "
2687 "back to user (%d)\n",
2688 args->buffer_count, ret);
2689 }
2690
2631pre_mutex_err: 2691pre_mutex_err:
2632 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 2692 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2633 DRM_MEM_DRIVER); 2693 DRM_MEM_DRIVER);
@@ -2652,6 +2712,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2652 DRM_ERROR("Failure to bind: %d", ret); 2712 DRM_ERROR("Failure to bind: %d", ret);
2653 return ret; 2713 return ret;
2654 } 2714 }
2715 /*
2716 * Pre-965 chips need a fence register set up in order to
2717 * properly handle tiled surfaces.
2718 */
2719 if (!IS_I965G(dev) &&
2720 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2721 obj_priv->tiling_mode != I915_TILING_NONE)
2722 i915_gem_object_get_fence_reg(obj, true);
2655 } 2723 }
2656 obj_priv->pin_count++; 2724 obj_priv->pin_count++;
2657 2725
@@ -2723,6 +2791,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2723 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 2791 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2724 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 2792 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2725 args->handle); 2793 args->handle);
2794 drm_gem_object_unreference(obj);
2726 mutex_unlock(&dev->struct_mutex); 2795 mutex_unlock(&dev->struct_mutex);
2727 return -EINVAL; 2796 return -EINVAL;
2728 } 2797 }
@@ -2803,6 +2872,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2803 return -EBADF; 2872 return -EBADF;
2804 } 2873 }
2805 2874
2875 /* Update the active list for the hardware's current position.
2876 * Otherwise this only updates on a delayed timer or when irqs are
2877 * actually unmasked, and our working set ends up being larger than
2878 * required.
2879 */
2880 i915_gem_retire_requests(dev);
2881
2806 obj_priv = obj->driver_private; 2882 obj_priv = obj->driver_private;
2807 /* Don't count being on the flushing list against the object being 2883 /* Don't count being on the flushing list against the object being
2808 * done. Otherwise, a buffer left on the flushing list but not getting 2884 * done. Otherwise, a buffer left on the flushing list but not getting
@@ -2855,9 +2931,6 @@ int i915_gem_init_object(struct drm_gem_object *obj)
2855void i915_gem_free_object(struct drm_gem_object *obj) 2931void i915_gem_free_object(struct drm_gem_object *obj)
2856{ 2932{
2857 struct drm_device *dev = obj->dev; 2933 struct drm_device *dev = obj->dev;
2858 struct drm_gem_mm *mm = dev->mm_private;
2859 struct drm_map_list *list;
2860 struct drm_map *map;
2861 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2934 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2862 2935
2863 while (obj_priv->pin_count > 0) 2936 while (obj_priv->pin_count > 0)
@@ -2868,19 +2941,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2868 2941
2869 i915_gem_object_unbind(obj); 2942 i915_gem_object_unbind(obj);
2870 2943
2871 list = &obj->map_list; 2944 i915_gem_free_mmap_offset(obj);
2872 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2873
2874 if (list->file_offset_node) {
2875 drm_mm_put_block(list->file_offset_node);
2876 list->file_offset_node = NULL;
2877 }
2878
2879 map = list->map;
2880 if (map) {
2881 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2882 list->map = NULL;
2883 }
2884 2945
2885 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2946 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2886 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2947 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
@@ -2919,7 +2980,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2919 return 0; 2980 return 0;
2920} 2981}
2921 2982
2922static int 2983int
2923i915_gem_idle(struct drm_device *dev) 2984i915_gem_idle(struct drm_device *dev)
2924{ 2985{
2925 drm_i915_private_t *dev_priv = dev->dev_private; 2986 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3065,6 +3126,7 @@ i915_gem_init_hws(struct drm_device *dev)
3065 if (dev_priv->hw_status_page == NULL) { 3126 if (dev_priv->hw_status_page == NULL) {
3066 DRM_ERROR("Failed to map status page.\n"); 3127 DRM_ERROR("Failed to map status page.\n");
3067 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3128 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3129 i915_gem_object_unpin(obj);
3068 drm_gem_object_unreference(obj); 3130 drm_gem_object_unreference(obj);
3069 return -EINVAL; 3131 return -EINVAL;
3070 } 3132 }
@@ -3077,6 +3139,31 @@ i915_gem_init_hws(struct drm_device *dev)
3077 return 0; 3139 return 0;
3078} 3140}
3079 3141
3142static void
3143i915_gem_cleanup_hws(struct drm_device *dev)
3144{
3145 drm_i915_private_t *dev_priv = dev->dev_private;
3146 struct drm_gem_object *obj;
3147 struct drm_i915_gem_object *obj_priv;
3148
3149 if (dev_priv->hws_obj == NULL)
3150 return;
3151
3152 obj = dev_priv->hws_obj;
3153 obj_priv = obj->driver_private;
3154
3155 kunmap(obj_priv->page_list[0]);
3156 i915_gem_object_unpin(obj);
3157 drm_gem_object_unreference(obj);
3158 dev_priv->hws_obj = NULL;
3159
3160 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3161 dev_priv->hw_status_page = NULL;
3162
3163 /* Write high address into HWS_PGA when disabling. */
3164 I915_WRITE(HWS_PGA, 0x1ffff000);
3165}
3166
3080int 3167int
3081i915_gem_init_ringbuffer(struct drm_device *dev) 3168i915_gem_init_ringbuffer(struct drm_device *dev)
3082{ 3169{
@@ -3094,6 +3181,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3094 obj = drm_gem_object_alloc(dev, 128 * 1024); 3181 obj = drm_gem_object_alloc(dev, 128 * 1024);
3095 if (obj == NULL) { 3182 if (obj == NULL) {
3096 DRM_ERROR("Failed to allocate ringbuffer\n"); 3183 DRM_ERROR("Failed to allocate ringbuffer\n");
3184 i915_gem_cleanup_hws(dev);
3097 return -ENOMEM; 3185 return -ENOMEM;
3098 } 3186 }
3099 obj_priv = obj->driver_private; 3187 obj_priv = obj->driver_private;
@@ -3101,6 +3189,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3101 ret = i915_gem_object_pin(obj, 4096); 3189 ret = i915_gem_object_pin(obj, 4096);
3102 if (ret != 0) { 3190 if (ret != 0) {
3103 drm_gem_object_unreference(obj); 3191 drm_gem_object_unreference(obj);
3192 i915_gem_cleanup_hws(dev);
3104 return ret; 3193 return ret;
3105 } 3194 }
3106 3195
@@ -3118,7 +3207,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3118 if (ring->map.handle == NULL) { 3207 if (ring->map.handle == NULL) {
3119 DRM_ERROR("Failed to map ringbuffer.\n"); 3208 DRM_ERROR("Failed to map ringbuffer.\n");
3120 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3209 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3210 i915_gem_object_unpin(obj);
3121 drm_gem_object_unreference(obj); 3211 drm_gem_object_unreference(obj);
3212 i915_gem_cleanup_hws(dev);
3122 return -EINVAL; 3213 return -EINVAL;
3123 } 3214 }
3124 ring->ring_obj = obj; 3215 ring->ring_obj = obj;
@@ -3198,20 +3289,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3198 dev_priv->ring.ring_obj = NULL; 3289 dev_priv->ring.ring_obj = NULL;
3199 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3290 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3200 3291
3201 if (dev_priv->hws_obj != NULL) { 3292 i915_gem_cleanup_hws(dev);
3202 struct drm_gem_object *obj = dev_priv->hws_obj;
3203 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3204
3205 kunmap(obj_priv->page_list[0]);
3206 i915_gem_object_unpin(obj);
3207 drm_gem_object_unreference(obj);
3208 dev_priv->hws_obj = NULL;
3209 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3210 dev_priv->hw_status_page = NULL;
3211
3212 /* Write high address into HWS_PGA when disabling. */
3213 I915_WRITE(HWS_PGA, 0x1ffff000);
3214 }
3215} 3293}
3216 3294
3217int 3295int
@@ -3229,10 +3307,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3229 dev_priv->mm.wedged = 0; 3307 dev_priv->mm.wedged = 0;
3230 } 3308 }
3231 3309
3232 dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
3233 dev->agp->agp_info.aper_size
3234 * 1024 * 1024);
3235
3236 mutex_lock(&dev->struct_mutex); 3310 mutex_lock(&dev->struct_mutex);
3237 dev_priv->mm.suspended = 0; 3311 dev_priv->mm.suspended = 0;
3238 3312
@@ -3255,7 +3329,6 @@ int
3255i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 3329i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3256 struct drm_file *file_priv) 3330 struct drm_file *file_priv)
3257{ 3331{
3258 drm_i915_private_t *dev_priv = dev->dev_private;
3259 int ret; 3332 int ret;
3260 3333
3261 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3334 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -3264,7 +3337,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3264 ret = i915_gem_idle(dev); 3337 ret = i915_gem_idle(dev);
3265 drm_irq_uninstall(dev); 3338 drm_irq_uninstall(dev);
3266 3339
3267 io_mapping_free(dev_priv->mm.gtt_mapping);
3268 return ret; 3340 return ret;
3269} 3341}
3270 3342
@@ -3273,6 +3345,9 @@ i915_gem_lastclose(struct drm_device *dev)
3273{ 3345{
3274 int ret; 3346 int ret;
3275 3347
3348 if (drm_core_check_feature(dev, DRIVER_MODESET))
3349 return;
3350
3276 ret = i915_gem_idle(dev); 3351 ret = i915_gem_idle(dev);
3277 if (ret) 3352 if (ret)
3278 DRM_ERROR("failed to idle hardware: %d\n", ret); 3353 DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -3294,7 +3369,7 @@ i915_gem_load(struct drm_device *dev)
3294 /* Old X drivers will take 0-2 for front, back, depth buffers */ 3369 /* Old X drivers will take 0-2 for front, back, depth buffers */
3295 dev_priv->fence_reg_start = 3; 3370 dev_priv->fence_reg_start = 3;
3296 3371
3297 if (IS_I965G(dev)) 3372 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3298 dev_priv->num_fence_regs = 16; 3373 dev_priv->num_fence_regs = 16;
3299 else 3374 else
3300 dev_priv->num_fence_regs = 8; 3375 dev_priv->num_fence_regs = 8;