aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c113
1 files changed, 91 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index abfc27b0c2ea..5b46623d62d4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1583,7 +1583,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1583 * 1583 *
1584 * Returned sequence numbers are nonzero on success. 1584 * Returned sequence numbers are nonzero on success.
1585 */ 1585 */
1586static uint32_t 1586uint32_t
1587i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1587i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1588 uint32_t flush_domains) 1588 uint32_t flush_domains)
1589{ 1589{
@@ -1617,7 +1617,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1617 OUT_RING(MI_USER_INTERRUPT); 1617 OUT_RING(MI_USER_INTERRUPT);
1618 ADVANCE_LP_RING(); 1618 ADVANCE_LP_RING();
1619 1619
1620 DRM_DEBUG("%d\n", seqno); 1620 DRM_DEBUG_DRIVER("%d\n", seqno);
1621 1621
1622 request->seqno = seqno; 1622 request->seqno = seqno;
1623 request->emitted_jiffies = jiffies; 1623 request->emitted_jiffies = jiffies;
@@ -1820,12 +1820,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
1820 mutex_unlock(&dev->struct_mutex); 1820 mutex_unlock(&dev->struct_mutex);
1821} 1821}
1822 1822
1823/** 1823int
1824 * Waits for a sequence number to be signaled, and cleans up the 1824i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1825 * request and object lists appropriately for that event.
1826 */
1827static int
1828i915_wait_request(struct drm_device *dev, uint32_t seqno)
1829{ 1825{
1830 drm_i915_private_t *dev_priv = dev->dev_private; 1826 drm_i915_private_t *dev_priv = dev->dev_private;
1831 u32 ier; 1827 u32 ier;
@@ -1837,7 +1833,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1837 return -EIO; 1833 return -EIO;
1838 1834
1839 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1835 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1840 if (IS_IGDNG(dev)) 1836 if (IS_IRONLAKE(dev))
1841 ier = I915_READ(DEIER) | I915_READ(GTIER); 1837 ier = I915_READ(DEIER) | I915_READ(GTIER);
1842 else 1838 else
1843 ier = I915_READ(IER); 1839 ier = I915_READ(IER);
@@ -1852,10 +1848,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1852 1848
1853 dev_priv->mm.waiting_gem_seqno = seqno; 1849 dev_priv->mm.waiting_gem_seqno = seqno;
1854 i915_user_irq_get(dev); 1850 i915_user_irq_get(dev);
1855 ret = wait_event_interruptible(dev_priv->irq_queue, 1851 if (interruptible)
1856 i915_seqno_passed(i915_get_gem_seqno(dev), 1852 ret = wait_event_interruptible(dev_priv->irq_queue,
1857 seqno) || 1853 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged)); 1854 atomic_read(&dev_priv->mm.wedged));
1855 else
1856 wait_event(dev_priv->irq_queue,
1857 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged));
1859
1859 i915_user_irq_put(dev); 1860 i915_user_irq_put(dev);
1860 dev_priv->mm.waiting_gem_seqno = 0; 1861 dev_priv->mm.waiting_gem_seqno = 0;
1861 1862
@@ -1879,6 +1880,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1879 return ret; 1880 return ret;
1880} 1881}
1881 1882
1883/**
1884 * Waits for a sequence number to be signaled, and cleans up the
1885 * request and object lists appropriately for that event.
1886 */
1887static int
1888i915_wait_request(struct drm_device *dev, uint32_t seqno)
1889{
1890 return i915_do_wait_request(dev, seqno, 1);
1891}
1892
1882static void 1893static void
1883i915_gem_flush(struct drm_device *dev, 1894i915_gem_flush(struct drm_device *dev,
1884 uint32_t invalidate_domains, 1895 uint32_t invalidate_domains,
@@ -1947,7 +1958,7 @@ i915_gem_flush(struct drm_device *dev,
1947#endif 1958#endif
1948 BEGIN_LP_RING(2); 1959 BEGIN_LP_RING(2);
1949 OUT_RING(cmd); 1960 OUT_RING(cmd);
1950 OUT_RING(0); /* noop */ 1961 OUT_RING(MI_NOOP);
1951 ADVANCE_LP_RING(); 1962 ADVANCE_LP_RING();
1952 } 1963 }
1953} 1964}
@@ -2760,6 +2771,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2760 old_write_domain); 2771 old_write_domain);
2761} 2772}
2762 2773
2774void
2775i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2776{
2777 switch (obj->write_domain) {
2778 case I915_GEM_DOMAIN_GTT:
2779 i915_gem_object_flush_gtt_write_domain(obj);
2780 break;
2781 case I915_GEM_DOMAIN_CPU:
2782 i915_gem_object_flush_cpu_write_domain(obj);
2783 break;
2784 default:
2785 i915_gem_object_flush_gpu_write_domain(obj);
2786 break;
2787 }
2788}
2789
2763/** 2790/**
2764 * Moves a single object to the GTT read, and possibly write domain. 2791 * Moves a single object to the GTT read, and possibly write domain.
2765 * 2792 *
@@ -3525,6 +3552,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3525 return 0; 3552 return 0;
3526} 3553}
3527 3554
3555static int
3556i915_gem_wait_for_pending_flip(struct drm_device *dev,
3557 struct drm_gem_object **object_list,
3558 int count)
3559{
3560 drm_i915_private_t *dev_priv = dev->dev_private;
3561 struct drm_i915_gem_object *obj_priv;
3562 DEFINE_WAIT(wait);
3563 int i, ret = 0;
3564
3565 for (;;) {
3566 prepare_to_wait(&dev_priv->pending_flip_queue,
3567 &wait, TASK_INTERRUPTIBLE);
3568 for (i = 0; i < count; i++) {
3569 obj_priv = object_list[i]->driver_private;
3570 if (atomic_read(&obj_priv->pending_flip) > 0)
3571 break;
3572 }
3573 if (i == count)
3574 break;
3575
3576 if (!signal_pending(current)) {
3577 mutex_unlock(&dev->struct_mutex);
3578 schedule();
3579 mutex_lock(&dev->struct_mutex);
3580 continue;
3581 }
3582 ret = -ERESTARTSYS;
3583 break;
3584 }
3585 finish_wait(&dev_priv->pending_flip_queue, &wait);
3586
3587 return ret;
3588}
3589
3528int 3590int
3529i915_gem_execbuffer(struct drm_device *dev, void *data, 3591i915_gem_execbuffer(struct drm_device *dev, void *data,
3530 struct drm_file *file_priv) 3592 struct drm_file *file_priv)
@@ -3540,7 +3602,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3540 int ret, ret2, i, pinned = 0; 3602 int ret, ret2, i, pinned = 0;
3541 uint64_t exec_offset; 3603 uint64_t exec_offset;
3542 uint32_t seqno, flush_domains, reloc_index; 3604 uint32_t seqno, flush_domains, reloc_index;
3543 int pin_tries; 3605 int pin_tries, flips;
3544 3606
3545#if WATCH_EXEC 3607#if WATCH_EXEC
3546 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3608 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3614,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3552 return -EINVAL; 3614 return -EINVAL;
3553 } 3615 }
3554 /* Copy in the exec list from userland */ 3616 /* Copy in the exec list from userland */
3555 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); 3617 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3556 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); 3618 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3557 if (exec_list == NULL || object_list == NULL) { 3619 if (exec_list == NULL || object_list == NULL) {
3558 DRM_ERROR("Failed to allocate exec or object list " 3620 DRM_ERROR("Failed to allocate exec or object list "
3559 "for %d buffers\n", 3621 "for %d buffers\n",
@@ -3598,20 +3660,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3598 i915_verify_inactive(dev, __FILE__, __LINE__); 3660 i915_verify_inactive(dev, __FILE__, __LINE__);
3599 3661
3600 if (atomic_read(&dev_priv->mm.wedged)) { 3662 if (atomic_read(&dev_priv->mm.wedged)) {
3601 DRM_ERROR("Execbuf while wedged\n");
3602 mutex_unlock(&dev->struct_mutex); 3663 mutex_unlock(&dev->struct_mutex);
3603 ret = -EIO; 3664 ret = -EIO;
3604 goto pre_mutex_err; 3665 goto pre_mutex_err;
3605 } 3666 }
3606 3667
3607 if (dev_priv->mm.suspended) { 3668 if (dev_priv->mm.suspended) {
3608 DRM_ERROR("Execbuf while VT-switched.\n");
3609 mutex_unlock(&dev->struct_mutex); 3669 mutex_unlock(&dev->struct_mutex);
3610 ret = -EBUSY; 3670 ret = -EBUSY;
3611 goto pre_mutex_err; 3671 goto pre_mutex_err;
3612 } 3672 }
3613 3673
3614 /* Look up object handles */ 3674 /* Look up object handles */
3675 flips = 0;
3615 for (i = 0; i < args->buffer_count; i++) { 3676 for (i = 0; i < args->buffer_count; i++) {
3616 object_list[i] = drm_gem_object_lookup(dev, file_priv, 3677 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3617 exec_list[i].handle); 3678 exec_list[i].handle);
@@ -3630,6 +3691,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3630 goto err; 3691 goto err;
3631 } 3692 }
3632 obj_priv->in_execbuffer = true; 3693 obj_priv->in_execbuffer = true;
3694 flips += atomic_read(&obj_priv->pending_flip);
3695 }
3696
3697 if (flips > 0) {
3698 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3699 args->buffer_count);
3700 if (ret)
3701 goto err;
3633 } 3702 }
3634 3703
3635 /* Pin and relocate */ 3704 /* Pin and relocate */
@@ -4356,7 +4425,7 @@ i915_gem_init_hws(struct drm_device *dev)
4356 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4425 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4357 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4426 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4358 I915_READ(HWS_PGA); /* posting read */ 4427 I915_READ(HWS_PGA); /* posting read */
4359 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4428 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4360 4429
4361 return 0; 4430 return 0;
4362} 4431}
@@ -4614,8 +4683,8 @@ i915_gem_load(struct drm_device *dev)
4614 for (i = 0; i < 8; i++) 4683 for (i = 0; i < 8; i++)
4615 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 4684 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4616 } 4685 }
4617
4618 i915_gem_detect_bit_6_swizzle(dev); 4686 i915_gem_detect_bit_6_swizzle(dev);
4687 init_waitqueue_head(&dev_priv->pending_flip_queue);
4619} 4688}
4620 4689
4621/* 4690/*
@@ -4790,7 +4859,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4790 user_data = (char __user *) (uintptr_t) args->data_ptr; 4859 user_data = (char __user *) (uintptr_t) args->data_ptr;
4791 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; 4860 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4792 4861
4793 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); 4862 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4794 ret = copy_from_user(obj_addr, user_data, args->size); 4863 ret = copy_from_user(obj_addr, user_data, args->size);
4795 if (ret) 4864 if (ret)
4796 return -EFAULT; 4865 return -EFAULT;