aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 00:56:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 00:56:47 -0500
commit3ef884b4c04e857c283cc77ca70ad8f638d94b0e (patch)
treec8c5b872e836e6ffe8bd08ab3477f9e8260575ed /drivers/gpu/drm/i915/i915_gem.c
parent4e5df8069b0e4e36c6b528b3be7da298e6f454cd (diff)
parent4361e52ad0372e6fd2240a2207b49a4de1f45ca9 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (189 commits) drm/radeon/kms: fix warning about cur_placement being uninitialised. drm/ttm: Print debug information on memory manager when eviction fails drm: Add memory manager debug function drm/radeon/kms: restore surface registers on resume. drm/radeon/kms/r600/r700: fallback gracefully on ucode failure drm/ttm: Initialize eviction placement in case the driver callback doesn't drm/radeon/kms: cleanup structure and module if initialization fails drm/radeon/kms: actualy set the eviction placements we choose drm/radeon/kms: Fix NULL ptr dereference drm/radeon/kms/avivo: add support for new pll selection algo drm/radeon/kms/avivo: fix some bugs in the display bandwidth setup drm/radeon/kms: fix return value from fence function. drm/radeon: Remove tests for -ERESTART from the TTM code. drm/ttm: Have the TTM code return -ERESTARTSYS instead of -ERESTART. drm/radeon/kms: Convert radeon to new TTM validation API (V2) drm/ttm: Rework validation & memory space allocation (V3) drm: Add search/get functions to get a block in a specific range drm/radeon/kms: fix avivo tiling regression since radeon object rework drm/i915: Remove a debugging printk from hangcheck drm/radeon/kms: make sure i2c id matches ...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c114
1 files changed, 92 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2a3fa59992..8c463cf2050 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1288,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1288 list->hash.key = list->file_offset_node->start; 1288 list->hash.key = list->file_offset_node->start;
1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { 1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1290 DRM_ERROR("failed to add to map hash\n"); 1290 DRM_ERROR("failed to add to map hash\n");
1291 ret = -ENOMEM;
1291 goto out_free_mm; 1292 goto out_free_mm;
1292 } 1293 }
1293 1294
@@ -1583,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1583 * 1584 *
1584 * Returned sequence numbers are nonzero on success. 1585 * Returned sequence numbers are nonzero on success.
1585 */ 1586 */
1586static uint32_t 1587uint32_t
1587i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1588i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1588 uint32_t flush_domains) 1589 uint32_t flush_domains)
1589{ 1590{
@@ -1617,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1617 OUT_RING(MI_USER_INTERRUPT); 1618 OUT_RING(MI_USER_INTERRUPT);
1618 ADVANCE_LP_RING(); 1619 ADVANCE_LP_RING();
1619 1620
1620 DRM_DEBUG("%d\n", seqno); 1621 DRM_DEBUG_DRIVER("%d\n", seqno);
1621 1622
1622 request->seqno = seqno; 1623 request->seqno = seqno;
1623 request->emitted_jiffies = jiffies; 1624 request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
1820 mutex_unlock(&dev->struct_mutex); 1821 mutex_unlock(&dev->struct_mutex);
1821} 1822}
1822 1823
1823/** 1824int
1824 * Waits for a sequence number to be signaled, and cleans up the 1825i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1825 * request and object lists appropriately for that event.
1826 */
1827static int
1828i915_wait_request(struct drm_device *dev, uint32_t seqno)
1829{ 1826{
1830 drm_i915_private_t *dev_priv = dev->dev_private; 1827 drm_i915_private_t *dev_priv = dev->dev_private;
1831 u32 ier; 1828 u32 ier;
@@ -1837,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1837 return -EIO; 1834 return -EIO;
1838 1835
1839 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1836 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1840 if (IS_IGDNG(dev)) 1837 if (IS_IRONLAKE(dev))
1841 ier = I915_READ(DEIER) | I915_READ(GTIER); 1838 ier = I915_READ(DEIER) | I915_READ(GTIER);
1842 else 1839 else
1843 ier = I915_READ(IER); 1840 ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1852 1849
1853 dev_priv->mm.waiting_gem_seqno = seqno; 1850 dev_priv->mm.waiting_gem_seqno = seqno;
1854 i915_user_irq_get(dev); 1851 i915_user_irq_get(dev);
1855 ret = wait_event_interruptible(dev_priv->irq_queue, 1852 if (interruptible)
1856 i915_seqno_passed(i915_get_gem_seqno(dev), 1853 ret = wait_event_interruptible(dev_priv->irq_queue,
1857 seqno) || 1854 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged)); 1855 atomic_read(&dev_priv->mm.wedged));
1856 else
1857 wait_event(dev_priv->irq_queue,
1858 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1859 atomic_read(&dev_priv->mm.wedged));
1860
1859 i915_user_irq_put(dev); 1861 i915_user_irq_put(dev);
1860 dev_priv->mm.waiting_gem_seqno = 0; 1862 dev_priv->mm.waiting_gem_seqno = 0;
1861 1863
@@ -1879,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1879 return ret; 1881 return ret;
1880} 1882}
1881 1883
1884/**
1885 * Waits for a sequence number to be signaled, and cleans up the
1886 * request and object lists appropriately for that event.
1887 */
1888static int
1889i915_wait_request(struct drm_device *dev, uint32_t seqno)
1890{
1891 return i915_do_wait_request(dev, seqno, 1);
1892}
1893
1882static void 1894static void
1883i915_gem_flush(struct drm_device *dev, 1895i915_gem_flush(struct drm_device *dev,
1884 uint32_t invalidate_domains, 1896 uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
1947#endif 1959#endif
1948 BEGIN_LP_RING(2); 1960 BEGIN_LP_RING(2);
1949 OUT_RING(cmd); 1961 OUT_RING(cmd);
1950 OUT_RING(0); /* noop */ 1962 OUT_RING(MI_NOOP);
1951 ADVANCE_LP_RING(); 1963 ADVANCE_LP_RING();
1952 } 1964 }
1953} 1965}
@@ -2760,6 +2772,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2760 old_write_domain); 2772 old_write_domain);
2761} 2773}
2762 2774
2775void
2776i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2777{
2778 switch (obj->write_domain) {
2779 case I915_GEM_DOMAIN_GTT:
2780 i915_gem_object_flush_gtt_write_domain(obj);
2781 break;
2782 case I915_GEM_DOMAIN_CPU:
2783 i915_gem_object_flush_cpu_write_domain(obj);
2784 break;
2785 default:
2786 i915_gem_object_flush_gpu_write_domain(obj);
2787 break;
2788 }
2789}
2790
2763/** 2791/**
2764 * Moves a single object to the GTT read, and possibly write domain. 2792 * Moves a single object to the GTT read, and possibly write domain.
2765 * 2793 *
@@ -3525,6 +3553,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3525 return 0; 3553 return 0;
3526} 3554}
3527 3555
3556static int
3557i915_gem_wait_for_pending_flip(struct drm_device *dev,
3558 struct drm_gem_object **object_list,
3559 int count)
3560{
3561 drm_i915_private_t *dev_priv = dev->dev_private;
3562 struct drm_i915_gem_object *obj_priv;
3563 DEFINE_WAIT(wait);
3564 int i, ret = 0;
3565
3566 for (;;) {
3567 prepare_to_wait(&dev_priv->pending_flip_queue,
3568 &wait, TASK_INTERRUPTIBLE);
3569 for (i = 0; i < count; i++) {
3570 obj_priv = object_list[i]->driver_private;
3571 if (atomic_read(&obj_priv->pending_flip) > 0)
3572 break;
3573 }
3574 if (i == count)
3575 break;
3576
3577 if (!signal_pending(current)) {
3578 mutex_unlock(&dev->struct_mutex);
3579 schedule();
3580 mutex_lock(&dev->struct_mutex);
3581 continue;
3582 }
3583 ret = -ERESTARTSYS;
3584 break;
3585 }
3586 finish_wait(&dev_priv->pending_flip_queue, &wait);
3587
3588 return ret;
3589}
3590
3528int 3591int
3529i915_gem_execbuffer(struct drm_device *dev, void *data, 3592i915_gem_execbuffer(struct drm_device *dev, void *data,
3530 struct drm_file *file_priv) 3593 struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3540 int ret, ret2, i, pinned = 0; 3603 int ret, ret2, i, pinned = 0;
3541 uint64_t exec_offset; 3604 uint64_t exec_offset;
3542 uint32_t seqno, flush_domains, reloc_index; 3605 uint32_t seqno, flush_domains, reloc_index;
3543 int pin_tries; 3606 int pin_tries, flips;
3544 3607
3545#if WATCH_EXEC 3608#if WATCH_EXEC
3546 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3609 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3552 return -EINVAL; 3615 return -EINVAL;
3553 } 3616 }
3554 /* Copy in the exec list from userland */ 3617 /* Copy in the exec list from userland */
3555 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); 3618 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3556 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); 3619 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3557 if (exec_list == NULL || object_list == NULL) { 3620 if (exec_list == NULL || object_list == NULL) {
3558 DRM_ERROR("Failed to allocate exec or object list " 3621 DRM_ERROR("Failed to allocate exec or object list "
3559 "for %d buffers\n", 3622 "for %d buffers\n",
@@ -3598,20 +3661,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3598 i915_verify_inactive(dev, __FILE__, __LINE__); 3661 i915_verify_inactive(dev, __FILE__, __LINE__);
3599 3662
3600 if (atomic_read(&dev_priv->mm.wedged)) { 3663 if (atomic_read(&dev_priv->mm.wedged)) {
3601 DRM_ERROR("Execbuf while wedged\n");
3602 mutex_unlock(&dev->struct_mutex); 3664 mutex_unlock(&dev->struct_mutex);
3603 ret = -EIO; 3665 ret = -EIO;
3604 goto pre_mutex_err; 3666 goto pre_mutex_err;
3605 } 3667 }
3606 3668
3607 if (dev_priv->mm.suspended) { 3669 if (dev_priv->mm.suspended) {
3608 DRM_ERROR("Execbuf while VT-switched.\n");
3609 mutex_unlock(&dev->struct_mutex); 3670 mutex_unlock(&dev->struct_mutex);
3610 ret = -EBUSY; 3671 ret = -EBUSY;
3611 goto pre_mutex_err; 3672 goto pre_mutex_err;
3612 } 3673 }
3613 3674
3614 /* Look up object handles */ 3675 /* Look up object handles */
3676 flips = 0;
3615 for (i = 0; i < args->buffer_count; i++) { 3677 for (i = 0; i < args->buffer_count; i++) {
3616 object_list[i] = drm_gem_object_lookup(dev, file_priv, 3678 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3617 exec_list[i].handle); 3679 exec_list[i].handle);
@@ -3630,6 +3692,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3630 goto err; 3692 goto err;
3631 } 3693 }
3632 obj_priv->in_execbuffer = true; 3694 obj_priv->in_execbuffer = true;
3695 flips += atomic_read(&obj_priv->pending_flip);
3696 }
3697
3698 if (flips > 0) {
3699 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3700 args->buffer_count);
3701 if (ret)
3702 goto err;
3633 } 3703 }
3634 3704
3635 /* Pin and relocate */ 3705 /* Pin and relocate */
@@ -4356,7 +4426,7 @@ i915_gem_init_hws(struct drm_device *dev)
4356 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4426 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4357 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4427 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4358 I915_READ(HWS_PGA); /* posting read */ 4428 I915_READ(HWS_PGA); /* posting read */
4359 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4429 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4360 4430
4361 return 0; 4431 return 0;
4362} 4432}
@@ -4614,8 +4684,8 @@ i915_gem_load(struct drm_device *dev)
4614 for (i = 0; i < 8; i++) 4684 for (i = 0; i < 8; i++)
4615 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 4685 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4616 } 4686 }
4617
4618 i915_gem_detect_bit_6_swizzle(dev); 4687 i915_gem_detect_bit_6_swizzle(dev);
4688 init_waitqueue_head(&dev_priv->pending_flip_queue);
4619} 4689}
4620 4690
4621/* 4691/*
@@ -4790,7 +4860,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4790 user_data = (char __user *) (uintptr_t) args->data_ptr; 4860 user_data = (char __user *) (uintptr_t) args->data_ptr;
4791 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; 4861 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4792 4862
4793 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); 4863 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4794 ret = copy_from_user(obj_addr, user_data, args->size); 4864 ret = copy_from_user(obj_addr, user_data, args->size);
4795 if (ret) 4865 if (ret)
4796 return -EFAULT; 4866 return -EFAULT;