aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c268
1 files changed, 173 insertions, 95 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index debad5c04cc0..28b726d07a0c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,10 +34,6 @@
34 34
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36 36
37static void
38i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
41static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -52,7 +48,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
52static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54 unsigned alignment); 50 unsigned alignment);
55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 51static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 52static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57static int i915_gem_evict_something(struct drm_device *dev); 53static int i915_gem_evict_something(struct drm_device *dev);
58static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 54static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
@@ -567,6 +563,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
567 pgoff_t page_offset; 563 pgoff_t page_offset;
568 unsigned long pfn; 564 unsigned long pfn;
569 int ret = 0; 565 int ret = 0;
566 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
570 567
571 /* We don't use vmf->pgoff since that has the fake offset */ 568 /* We don't use vmf->pgoff since that has the fake offset */
572 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 569 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
@@ -585,8 +582,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
585 582
586 /* Need a new fence register? */ 583 /* Need a new fence register? */
587 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 584 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
588 obj_priv->tiling_mode != I915_TILING_NONE) 585 obj_priv->tiling_mode != I915_TILING_NONE) {
589 i915_gem_object_get_fence_reg(obj); 586 ret = i915_gem_object_get_fence_reg(obj, write);
587 if (ret) {
588 mutex_unlock(&dev->struct_mutex);
589 return VM_FAULT_SIGBUS;
590 }
591 }
590 592
591 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 593 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
592 page_offset; 594 page_offset;
@@ -601,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
601 case -EAGAIN: 603 case -EAGAIN:
602 return VM_FAULT_OOM; 604 return VM_FAULT_OOM;
603 case -EFAULT: 605 case -EFAULT:
604 case -EBUSY:
605 DRM_ERROR("can't insert pfn?? fault or busy...\n");
606 return VM_FAULT_SIGBUS; 606 return VM_FAULT_SIGBUS;
607 default: 607 default:
608 return VM_FAULT_NOPAGE; 608 return VM_FAULT_NOPAGE;
@@ -678,6 +678,30 @@ out_free_list:
678 return ret; 678 return ret;
679} 679}
680 680
681static void
682i915_gem_free_mmap_offset(struct drm_gem_object *obj)
683{
684 struct drm_device *dev = obj->dev;
685 struct drm_i915_gem_object *obj_priv = obj->driver_private;
686 struct drm_gem_mm *mm = dev->mm_private;
687 struct drm_map_list *list;
688
689 list = &obj->map_list;
690 drm_ht_remove_item(&mm->offset_hash, &list->hash);
691
692 if (list->file_offset_node) {
693 drm_mm_put_block(list->file_offset_node);
694 list->file_offset_node = NULL;
695 }
696
697 if (list->map) {
698 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
699 list->map = NULL;
700 }
701
702 obj_priv->mmap_offset = 0;
703}
704
681/** 705/**
682 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 706 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
683 * @obj: object to check 707 * @obj: object to check
@@ -752,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
752 776
753 if (!obj_priv->mmap_offset) { 777 if (!obj_priv->mmap_offset) {
754 ret = i915_gem_create_mmap_offset(obj); 778 ret = i915_gem_create_mmap_offset(obj);
755 if (ret) 779 if (ret) {
780 drm_gem_object_unreference(obj);
781 mutex_unlock(&dev->struct_mutex);
756 return ret; 782 return ret;
783 }
757 } 784 }
758 785
759 args->offset = obj_priv->mmap_offset; 786 args->offset = obj_priv->mmap_offset;
@@ -1024,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev)
1024 drm_i915_private_t *dev_priv = dev->dev_private; 1051 drm_i915_private_t *dev_priv = dev->dev_private;
1025 uint32_t seqno; 1052 uint32_t seqno;
1026 1053
1054 if (!dev_priv->hw_status_page)
1055 return;
1056
1027 seqno = i915_get_gem_seqno(dev); 1057 seqno = i915_get_gem_seqno(dev);
1028 1058
1029 while (!list_empty(&dev_priv->mm.request_list)) { 1059 while (!list_empty(&dev_priv->mm.request_list)) {
@@ -1211,7 +1241,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1211/** 1241/**
1212 * Unbinds an object from the GTT aperture. 1242 * Unbinds an object from the GTT aperture.
1213 */ 1243 */
1214static int 1244int
1215i915_gem_object_unbind(struct drm_gem_object *obj) 1245i915_gem_object_unbind(struct drm_gem_object *obj)
1216{ 1246{
1217 struct drm_device *dev = obj->dev; 1247 struct drm_device *dev = obj->dev;
@@ -1445,21 +1475,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1445 drm_i915_private_t *dev_priv = dev->dev_private; 1475 drm_i915_private_t *dev_priv = dev->dev_private;
1446 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1476 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1447 int regnum = obj_priv->fence_reg; 1477 int regnum = obj_priv->fence_reg;
1478 int tile_width;
1448 uint32_t val; 1479 uint32_t val;
1449 uint32_t pitch_val; 1480 uint32_t pitch_val;
1450 1481
1451 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1482 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1452 (obj_priv->gtt_offset & (obj->size - 1))) { 1483 (obj_priv->gtt_offset & (obj->size - 1))) {
1453 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1484 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1485 __func__, obj_priv->gtt_offset, obj->size);
1454 return; 1486 return;
1455 } 1487 }
1456 1488
1457 if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || 1489 if (obj_priv->tiling_mode == I915_TILING_Y &&
1458 IS_I945GM(dev) || 1490 HAS_128_BYTE_Y_TILING(dev))
1459 IS_G33(dev))) 1491 tile_width = 128;
1460 pitch_val = (obj_priv->stride / 128) - 1;
1461 else 1492 else
1462 pitch_val = (obj_priv->stride / 512) - 1; 1493 tile_width = 512;
1494
1495 /* Note: pitch better be a power of two tile widths */
1496 pitch_val = obj_priv->stride / tile_width;
1497 pitch_val = ffs(pitch_val) - 1;
1463 1498
1464 val = obj_priv->gtt_offset; 1499 val = obj_priv->gtt_offset;
1465 if (obj_priv->tiling_mode == I915_TILING_Y) 1500 if (obj_priv->tiling_mode == I915_TILING_Y)
@@ -1483,7 +1518,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1483 1518
1484 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1519 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1485 (obj_priv->gtt_offset & (obj->size - 1))) { 1520 (obj_priv->gtt_offset & (obj->size - 1))) {
1486 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1521 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1522 __func__, obj_priv->gtt_offset);
1487 return; 1523 return;
1488 } 1524 }
1489 1525
@@ -1503,6 +1539,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1503/** 1539/**
1504 * i915_gem_object_get_fence_reg - set up a fence reg for an object 1540 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1505 * @obj: object to map through a fence reg 1541 * @obj: object to map through a fence reg
1542 * @write: object is about to be written
1506 * 1543 *
1507 * When mapping objects through the GTT, userspace wants to be able to write 1544 * When mapping objects through the GTT, userspace wants to be able to write
1508 * to them without having to worry about swizzling if the object is tiled. 1545 * to them without having to worry about swizzling if the object is tiled.
@@ -1513,8 +1550,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1513 * It then sets up the reg based on the object's properties: address, pitch 1550 * It then sets up the reg based on the object's properties: address, pitch
1514 * and tiling format. 1551 * and tiling format.
1515 */ 1552 */
1516static void 1553static int
1517i915_gem_object_get_fence_reg(struct drm_gem_object *obj) 1554i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1518{ 1555{
1519 struct drm_device *dev = obj->dev; 1556 struct drm_device *dev = obj->dev;
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1557 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1527,12 +1564,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
1527 WARN(1, "allocating a fence for non-tiled object?\n"); 1564 WARN(1, "allocating a fence for non-tiled object?\n");
1528 break; 1565 break;
1529 case I915_TILING_X: 1566 case I915_TILING_X:
1530 WARN(obj_priv->stride & (512 - 1), 1567 if (!obj_priv->stride)
1531 "object is X tiled but has non-512B pitch\n"); 1568 return -EINVAL;
1569 WARN((obj_priv->stride & (512 - 1)),
1570 "object 0x%08x is X tiled but has non-512B pitch\n",
1571 obj_priv->gtt_offset);
1532 break; 1572 break;
1533 case I915_TILING_Y: 1573 case I915_TILING_Y:
1534 WARN(obj_priv->stride & (128 - 1), 1574 if (!obj_priv->stride)
1535 "object is Y tiled but has non-128B pitch\n"); 1575 return -EINVAL;
1576 WARN((obj_priv->stride & (128 - 1)),
1577 "object 0x%08x is Y tiled but has non-128B pitch\n",
1578 obj_priv->gtt_offset);
1536 break; 1579 break;
1537 } 1580 }
1538 1581
@@ -1563,10 +1606,11 @@ try_again:
1563 * objects to finish before trying again. 1606 * objects to finish before trying again.
1564 */ 1607 */
1565 if (i == dev_priv->num_fence_regs) { 1608 if (i == dev_priv->num_fence_regs) {
1566 ret = i915_gem_object_wait_rendering(reg->obj); 1609 ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
1567 if (ret) { 1610 if (ret) {
1568 WARN(ret, "wait_rendering failed: %d\n", ret); 1611 WARN(ret != -ERESTARTSYS,
1569 return; 1612 "switch to GTT domain failed: %d\n", ret);
1613 return ret;
1570 } 1614 }
1571 goto try_again; 1615 goto try_again;
1572 } 1616 }
@@ -1591,6 +1635,8 @@ try_again:
1591 i915_write_fence_reg(reg); 1635 i915_write_fence_reg(reg);
1592 else 1636 else
1593 i830_write_fence_reg(reg); 1637 i830_write_fence_reg(reg);
1638
1639 return 0;
1594} 1640}
1595 1641
1596/** 1642/**
@@ -1631,7 +1677,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1631 if (dev_priv->mm.suspended) 1677 if (dev_priv->mm.suspended)
1632 return -EBUSY; 1678 return -EBUSY;
1633 if (alignment == 0) 1679 if (alignment == 0)
1634 alignment = PAGE_SIZE; 1680 alignment = i915_gem_get_gtt_alignment(obj);
1635 if (alignment & (PAGE_SIZE - 1)) { 1681 if (alignment & (PAGE_SIZE - 1)) {
1636 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 1682 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1637 return -EINVAL; 1683 return -EINVAL;
@@ -1974,30 +2020,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1974 * drm_agp_chipset_flush 2020 * drm_agp_chipset_flush
1975 */ 2021 */
1976static void 2022static void
1977i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2023i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
1978 uint32_t read_domains,
1979 uint32_t write_domain)
1980{ 2024{
1981 struct drm_device *dev = obj->dev; 2025 struct drm_device *dev = obj->dev;
1982 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2026 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1983 uint32_t invalidate_domains = 0; 2027 uint32_t invalidate_domains = 0;
1984 uint32_t flush_domains = 0; 2028 uint32_t flush_domains = 0;
1985 2029
1986 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2030 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
1987 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2031 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
1988 2032
1989#if WATCH_BUF 2033#if WATCH_BUF
1990 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2034 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1991 __func__, obj, 2035 __func__, obj,
1992 obj->read_domains, read_domains, 2036 obj->read_domains, obj->pending_read_domains,
1993 obj->write_domain, write_domain); 2037 obj->write_domain, obj->pending_write_domain);
1994#endif 2038#endif
1995 /* 2039 /*
1996 * If the object isn't moving to a new write domain, 2040 * If the object isn't moving to a new write domain,
1997 * let the object stay in multiple read domains 2041 * let the object stay in multiple read domains
1998 */ 2042 */
1999 if (write_domain == 0) 2043 if (obj->pending_write_domain == 0)
2000 read_domains |= obj->read_domains; 2044 obj->pending_read_domains |= obj->read_domains;
2001 else 2045 else
2002 obj_priv->dirty = 1; 2046 obj_priv->dirty = 1;
2003 2047
@@ -2007,15 +2051,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2007 * any read domains which differ from the old 2051 * any read domains which differ from the old
2008 * write domain 2052 * write domain
2009 */ 2053 */
2010 if (obj->write_domain && obj->write_domain != read_domains) { 2054 if (obj->write_domain &&
2055 obj->write_domain != obj->pending_read_domains) {
2011 flush_domains |= obj->write_domain; 2056 flush_domains |= obj->write_domain;
2012 invalidate_domains |= read_domains & ~obj->write_domain; 2057 invalidate_domains |=
2058 obj->pending_read_domains & ~obj->write_domain;
2013 } 2059 }
2014 /* 2060 /*
2015 * Invalidate any read caches which may have 2061 * Invalidate any read caches which may have
2016 * stale data. That is, any new read domains. 2062 * stale data. That is, any new read domains.
2017 */ 2063 */
2018 invalidate_domains |= read_domains & ~obj->read_domains; 2064 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2019 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2065 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2020#if WATCH_BUF 2066#if WATCH_BUF
2021 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 2067 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
@@ -2024,9 +2070,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2024 i915_gem_clflush_object(obj); 2070 i915_gem_clflush_object(obj);
2025 } 2071 }
2026 2072
2027 if ((write_domain | flush_domains) != 0) 2073 /* The actual obj->write_domain will be updated with
2028 obj->write_domain = write_domain; 2074 * pending_write_domain after we emit the accumulated flush for all
2029 obj->read_domains = read_domains; 2075 * of our domain changes in execbuffers (which clears objects'
2076 * write_domains). So if we have a current write domain that we
2077 * aren't changing, set pending_write_domain to that.
2078 */
2079 if (flush_domains == 0 && obj->pending_write_domain == 0)
2080 obj->pending_write_domain = obj->write_domain;
2081 obj->read_domains = obj->pending_read_domains;
2030 2082
2031 dev->invalidate_domains |= invalidate_domains; 2083 dev->invalidate_domains |= invalidate_domains;
2032 dev->flush_domains |= flush_domains; 2084 dev->flush_domains |= flush_domains;
@@ -2229,6 +2281,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2229 (int) reloc.offset, 2281 (int) reloc.offset,
2230 reloc.read_domains, 2282 reloc.read_domains,
2231 reloc.write_domain); 2283 reloc.write_domain);
2284 drm_gem_object_unreference(target_obj);
2285 i915_gem_object_unpin(obj);
2232 return -EINVAL; 2286 return -EINVAL;
2233 } 2287 }
2234 2288
@@ -2458,13 +2512,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2458 if (dev_priv->mm.wedged) { 2512 if (dev_priv->mm.wedged) {
2459 DRM_ERROR("Execbuf while wedged\n"); 2513 DRM_ERROR("Execbuf while wedged\n");
2460 mutex_unlock(&dev->struct_mutex); 2514 mutex_unlock(&dev->struct_mutex);
2461 return -EIO; 2515 ret = -EIO;
2516 goto pre_mutex_err;
2462 } 2517 }
2463 2518
2464 if (dev_priv->mm.suspended) { 2519 if (dev_priv->mm.suspended) {
2465 DRM_ERROR("Execbuf while VT-switched.\n"); 2520 DRM_ERROR("Execbuf while VT-switched.\n");
2466 mutex_unlock(&dev->struct_mutex); 2521 mutex_unlock(&dev->struct_mutex);
2467 return -EBUSY; 2522 ret = -EBUSY;
2523 goto pre_mutex_err;
2468 } 2524 }
2469 2525
2470 /* Look up object handles */ 2526 /* Look up object handles */
@@ -2532,9 +2588,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2532 struct drm_gem_object *obj = object_list[i]; 2588 struct drm_gem_object *obj = object_list[i];
2533 2589
2534 /* Compute new gpu domains and update invalidate/flush */ 2590 /* Compute new gpu domains and update invalidate/flush */
2535 i915_gem_object_set_to_gpu_domain(obj, 2591 i915_gem_object_set_to_gpu_domain(obj);
2536 obj->pending_read_domains,
2537 obj->pending_write_domain);
2538 } 2592 }
2539 2593
2540 i915_verify_inactive(dev, __FILE__, __LINE__); 2594 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2553,6 +2607,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2553 (void)i915_add_request(dev, dev->flush_domains); 2607 (void)i915_add_request(dev, dev->flush_domains);
2554 } 2608 }
2555 2609
2610 for (i = 0; i < args->buffer_count; i++) {
2611 struct drm_gem_object *obj = object_list[i];
2612
2613 obj->write_domain = obj->pending_write_domain;
2614 }
2615
2556 i915_verify_inactive(dev, __FILE__, __LINE__); 2616 i915_verify_inactive(dev, __FILE__, __LINE__);
2557 2617
2558#if WATCH_COHERENCY 2618#if WATCH_COHERENCY
@@ -2610,15 +2670,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2610 2670
2611 i915_verify_inactive(dev, __FILE__, __LINE__); 2671 i915_verify_inactive(dev, __FILE__, __LINE__);
2612 2672
2613 /* Copy the new buffer offsets back to the user's exec list. */
2614 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2615 (uintptr_t) args->buffers_ptr,
2616 exec_list,
2617 sizeof(*exec_list) * args->buffer_count);
2618 if (ret)
2619 DRM_ERROR("failed to copy %d exec entries "
2620 "back to user (%d)\n",
2621 args->buffer_count, ret);
2622err: 2673err:
2623 for (i = 0; i < pinned; i++) 2674 for (i = 0; i < pinned; i++)
2624 i915_gem_object_unpin(object_list[i]); 2675 i915_gem_object_unpin(object_list[i]);
@@ -2628,6 +2679,18 @@ err:
2628 2679
2629 mutex_unlock(&dev->struct_mutex); 2680 mutex_unlock(&dev->struct_mutex);
2630 2681
2682 if (!ret) {
2683 /* Copy the new buffer offsets back to the user's exec list. */
2684 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2685 (uintptr_t) args->buffers_ptr,
2686 exec_list,
2687 sizeof(*exec_list) * args->buffer_count);
2688 if (ret)
2689 DRM_ERROR("failed to copy %d exec entries "
2690 "back to user (%d)\n",
2691 args->buffer_count, ret);
2692 }
2693
2631pre_mutex_err: 2694pre_mutex_err:
2632 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 2695 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2633 DRM_MEM_DRIVER); 2696 DRM_MEM_DRIVER);
@@ -2652,6 +2715,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2652 DRM_ERROR("Failure to bind: %d", ret); 2715 DRM_ERROR("Failure to bind: %d", ret);
2653 return ret; 2716 return ret;
2654 } 2717 }
2718 /*
2719 * Pre-965 chips need a fence register set up in order to
2720 * properly handle tiled surfaces.
2721 */
2722 if (!IS_I965G(dev) &&
2723 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2724 obj_priv->tiling_mode != I915_TILING_NONE)
2725 i915_gem_object_get_fence_reg(obj, true);
2655 } 2726 }
2656 obj_priv->pin_count++; 2727 obj_priv->pin_count++;
2657 2728
@@ -2723,6 +2794,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2723 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 2794 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2724 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 2795 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2725 args->handle); 2796 args->handle);
2797 drm_gem_object_unreference(obj);
2726 mutex_unlock(&dev->struct_mutex); 2798 mutex_unlock(&dev->struct_mutex);
2727 return -EINVAL; 2799 return -EINVAL;
2728 } 2800 }
@@ -2803,6 +2875,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2803 return -EBADF; 2875 return -EBADF;
2804 } 2876 }
2805 2877
2878 /* Update the active list for the hardware's current position.
2879 * Otherwise this only updates on a delayed timer or when irqs are
2880 * actually unmasked, and our working set ends up being larger than
2881 * required.
2882 */
2883 i915_gem_retire_requests(dev);
2884
2806 obj_priv = obj->driver_private; 2885 obj_priv = obj->driver_private;
2807 /* Don't count being on the flushing list against the object being 2886 /* Don't count being on the flushing list against the object being
2808 * done. Otherwise, a buffer left on the flushing list but not getting 2887 * done. Otherwise, a buffer left on the flushing list but not getting
@@ -2855,9 +2934,6 @@ int i915_gem_init_object(struct drm_gem_object *obj)
2855void i915_gem_free_object(struct drm_gem_object *obj) 2934void i915_gem_free_object(struct drm_gem_object *obj)
2856{ 2935{
2857 struct drm_device *dev = obj->dev; 2936 struct drm_device *dev = obj->dev;
2858 struct drm_gem_mm *mm = dev->mm_private;
2859 struct drm_map_list *list;
2860 struct drm_map *map;
2861 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2937 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2862 2938
2863 while (obj_priv->pin_count > 0) 2939 while (obj_priv->pin_count > 0)
@@ -2868,19 +2944,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2868 2944
2869 i915_gem_object_unbind(obj); 2945 i915_gem_object_unbind(obj);
2870 2946
2871 list = &obj->map_list; 2947 i915_gem_free_mmap_offset(obj);
2872 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2873
2874 if (list->file_offset_node) {
2875 drm_mm_put_block(list->file_offset_node);
2876 list->file_offset_node = NULL;
2877 }
2878
2879 map = list->map;
2880 if (map) {
2881 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2882 list->map = NULL;
2883 }
2884 2948
2885 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2949 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2886 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2950 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
@@ -2919,7 +2983,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2919 return 0; 2983 return 0;
2920} 2984}
2921 2985
2922static int 2986int
2923i915_gem_idle(struct drm_device *dev) 2987i915_gem_idle(struct drm_device *dev)
2924{ 2988{
2925 drm_i915_private_t *dev_priv = dev->dev_private; 2989 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3065,6 +3129,7 @@ i915_gem_init_hws(struct drm_device *dev)
3065 if (dev_priv->hw_status_page == NULL) { 3129 if (dev_priv->hw_status_page == NULL) {
3066 DRM_ERROR("Failed to map status page.\n"); 3130 DRM_ERROR("Failed to map status page.\n");
3067 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3131 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3132 i915_gem_object_unpin(obj);
3068 drm_gem_object_unreference(obj); 3133 drm_gem_object_unreference(obj);
3069 return -EINVAL; 3134 return -EINVAL;
3070 } 3135 }
@@ -3077,6 +3142,31 @@ i915_gem_init_hws(struct drm_device *dev)
3077 return 0; 3142 return 0;
3078} 3143}
3079 3144
3145static void
3146i915_gem_cleanup_hws(struct drm_device *dev)
3147{
3148 drm_i915_private_t *dev_priv = dev->dev_private;
3149 struct drm_gem_object *obj;
3150 struct drm_i915_gem_object *obj_priv;
3151
3152 if (dev_priv->hws_obj == NULL)
3153 return;
3154
3155 obj = dev_priv->hws_obj;
3156 obj_priv = obj->driver_private;
3157
3158 kunmap(obj_priv->page_list[0]);
3159 i915_gem_object_unpin(obj);
3160 drm_gem_object_unreference(obj);
3161 dev_priv->hws_obj = NULL;
3162
3163 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3164 dev_priv->hw_status_page = NULL;
3165
3166 /* Write high address into HWS_PGA when disabling. */
3167 I915_WRITE(HWS_PGA, 0x1ffff000);
3168}
3169
3080int 3170int
3081i915_gem_init_ringbuffer(struct drm_device *dev) 3171i915_gem_init_ringbuffer(struct drm_device *dev)
3082{ 3172{
@@ -3094,6 +3184,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3094 obj = drm_gem_object_alloc(dev, 128 * 1024); 3184 obj = drm_gem_object_alloc(dev, 128 * 1024);
3095 if (obj == NULL) { 3185 if (obj == NULL) {
3096 DRM_ERROR("Failed to allocate ringbuffer\n"); 3186 DRM_ERROR("Failed to allocate ringbuffer\n");
3187 i915_gem_cleanup_hws(dev);
3097 return -ENOMEM; 3188 return -ENOMEM;
3098 } 3189 }
3099 obj_priv = obj->driver_private; 3190 obj_priv = obj->driver_private;
@@ -3101,6 +3192,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3101 ret = i915_gem_object_pin(obj, 4096); 3192 ret = i915_gem_object_pin(obj, 4096);
3102 if (ret != 0) { 3193 if (ret != 0) {
3103 drm_gem_object_unreference(obj); 3194 drm_gem_object_unreference(obj);
3195 i915_gem_cleanup_hws(dev);
3104 return ret; 3196 return ret;
3105 } 3197 }
3106 3198
@@ -3118,7 +3210,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3118 if (ring->map.handle == NULL) { 3210 if (ring->map.handle == NULL) {
3119 DRM_ERROR("Failed to map ringbuffer.\n"); 3211 DRM_ERROR("Failed to map ringbuffer.\n");
3120 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3212 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3213 i915_gem_object_unpin(obj);
3121 drm_gem_object_unreference(obj); 3214 drm_gem_object_unreference(obj);
3215 i915_gem_cleanup_hws(dev);
3122 return -EINVAL; 3216 return -EINVAL;
3123 } 3217 }
3124 ring->ring_obj = obj; 3218 ring->ring_obj = obj;
@@ -3198,20 +3292,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3198 dev_priv->ring.ring_obj = NULL; 3292 dev_priv->ring.ring_obj = NULL;
3199 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3293 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3200 3294
3201 if (dev_priv->hws_obj != NULL) { 3295 i915_gem_cleanup_hws(dev);
3202 struct drm_gem_object *obj = dev_priv->hws_obj;
3203 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3204
3205 kunmap(obj_priv->page_list[0]);
3206 i915_gem_object_unpin(obj);
3207 drm_gem_object_unreference(obj);
3208 dev_priv->hws_obj = NULL;
3209 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3210 dev_priv->hw_status_page = NULL;
3211
3212 /* Write high address into HWS_PGA when disabling. */
3213 I915_WRITE(HWS_PGA, 0x1ffff000);
3214 }
3215} 3296}
3216 3297
3217int 3298int
@@ -3229,10 +3310,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3229 dev_priv->mm.wedged = 0; 3310 dev_priv->mm.wedged = 0;
3230 } 3311 }
3231 3312
3232 dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
3233 dev->agp->agp_info.aper_size
3234 * 1024 * 1024);
3235
3236 mutex_lock(&dev->struct_mutex); 3313 mutex_lock(&dev->struct_mutex);
3237 dev_priv->mm.suspended = 0; 3314 dev_priv->mm.suspended = 0;
3238 3315
@@ -3255,7 +3332,6 @@ int
3255i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 3332i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3256 struct drm_file *file_priv) 3333 struct drm_file *file_priv)
3257{ 3334{
3258 drm_i915_private_t *dev_priv = dev->dev_private;
3259 int ret; 3335 int ret;
3260 3336
3261 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3337 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -3264,7 +3340,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3264 ret = i915_gem_idle(dev); 3340 ret = i915_gem_idle(dev);
3265 drm_irq_uninstall(dev); 3341 drm_irq_uninstall(dev);
3266 3342
3267 io_mapping_free(dev_priv->mm.gtt_mapping);
3268 return ret; 3343 return ret;
3269} 3344}
3270 3345
@@ -3273,6 +3348,9 @@ i915_gem_lastclose(struct drm_device *dev)
3273{ 3348{
3274 int ret; 3349 int ret;
3275 3350
3351 if (drm_core_check_feature(dev, DRIVER_MODESET))
3352 return;
3353
3276 ret = i915_gem_idle(dev); 3354 ret = i915_gem_idle(dev);
3277 if (ret) 3355 if (ret)
3278 DRM_ERROR("failed to idle hardware: %d\n", ret); 3356 DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -3294,7 +3372,7 @@ i915_gem_load(struct drm_device *dev)
3294 /* Old X drivers will take 0-2 for front, back, depth buffers */ 3372 /* Old X drivers will take 0-2 for front, back, depth buffers */
3295 dev_priv->fence_reg_start = 3; 3373 dev_priv->fence_reg_start = 3;
3296 3374
3297 if (IS_I965G(dev)) 3375 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3298 dev_priv->num_fence_regs = 16; 3376 dev_priv->num_fence_regs = 16;
3299 else 3377 else
3300 dev_priv->num_fence_regs = 8; 3378 dev_priv->num_fence_regs = 8;