aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c118
1 files changed, 72 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b5..ef188e391406 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2172static int i915_ring_idle(struct drm_device *dev, 2172static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring) 2173 struct intel_ring_buffer *ring)
2174{ 2174{
2175 if (list_empty(&ring->gpu_write_list)) 2175 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 return 0; 2176 return 0;
2177 2177
2178 i915_gem_flush_ring(dev, NULL, ring, 2178 i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
2190 int ret; 2190 int ret;
2191 2191
2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193 list_empty(&dev_priv->render_ring.active_list) && 2193 list_empty(&dev_priv->mm.active_list));
2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2195 list_empty(&dev_priv->blt_ring.active_list));
2196 if (lists_empty) 2194 if (lists_empty)
2197 return 0; 2195 return 0;
2198 2196
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3108 * write domain 3106 * write domain
3109 */ 3107 */
3110 if (obj->write_domain && 3108 if (obj->write_domain &&
3111 obj->write_domain != obj->pending_read_domains) { 3109 (obj->write_domain != obj->pending_read_domains ||
3110 obj_priv->ring != ring)) {
3112 flush_domains |= obj->write_domain; 3111 flush_domains |= obj->write_domain;
3113 invalidate_domains |= 3112 invalidate_domains |=
3114 obj->pending_read_domains & ~obj->write_domain; 3113 obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3497 return 0; 3496 return 0;
3498} 3497}
3499 3498
3499static int
3500i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3501 struct drm_file *file,
3502 struct intel_ring_buffer *ring,
3503 struct drm_gem_object **objects,
3504 int count)
3505{
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3507 int ret, i;
3508
3509 /* Zero the global flush/invalidate flags. These
3510 * will be modified as new domains are computed
3511 * for each object
3512 */
3513 dev->invalidate_domains = 0;
3514 dev->flush_domains = 0;
3515 dev_priv->mm.flush_rings = 0;
3516 for (i = 0; i < count; i++)
3517 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3518
3519 if (dev->invalidate_domains | dev->flush_domains) {
3520#if WATCH_EXEC
3521 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3522 __func__,
3523 dev->invalidate_domains,
3524 dev->flush_domains);
3525#endif
3526 i915_gem_flush(dev, file,
3527 dev->invalidate_domains,
3528 dev->flush_domains,
3529 dev_priv->mm.flush_rings);
3530 }
3531
3532 for (i = 0; i < count; i++) {
3533 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3534 /* XXX replace with semaphores */
3535 if (obj->ring && ring != obj->ring) {
3536 ret = i915_gem_object_wait_rendering(&obj->base, true);
3537 if (ret)
3538 return ret;
3539 }
3540 }
3541
3542 return 0;
3543}
3544
3500/* Throttle our rendering by waiting until the ring has completed our requests 3545/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago. 3546 * emitted over 20 msec ago.
3502 * 3547 *
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3757 goto err; 3802 goto err;
3758 } 3803 }
3759 3804
3760 /* Zero the global flush/invalidate flags. These 3805 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3761 * will be modified as new domains are computed 3806 object_list, args->buffer_count);
3762 * for each object 3807 if (ret)
3763 */ 3808 goto err;
3764 dev->invalidate_domains = 0;
3765 dev->flush_domains = 0;
3766 dev_priv->mm.flush_rings = 0;
3767
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770
3771 /* Compute new gpu domains and update invalidate/flush */
3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3773 }
3774
3775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev, file,
3783 dev->invalidate_domains,
3784 dev->flush_domains,
3785 dev_priv->mm.flush_rings);
3786 }
3787 3809
3788 for (i = 0; i < args->buffer_count; i++) { 3810 for (i = 0; i < args->buffer_count; i++) {
3789 struct drm_gem_object *obj = object_list[i]; 3811 struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4043 alignment = i915_gem_get_gtt_alignment(obj); 4065 alignment = i915_gem_get_gtt_alignment(obj);
4044 if (obj_priv->gtt_offset & (alignment - 1)) { 4066 if (obj_priv->gtt_offset & (alignment - 1)) {
4045 WARN(obj_priv->pin_count, 4067 WARN(obj_priv->pin_count,
4046 "bo is already pinned with incorrect alignment:" 4068 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
4047 " offset=%x, req.alignment=%x\n",
4048 obj_priv->gtt_offset, alignment); 4069 obj_priv->gtt_offset, alignment);
4049 ret = i915_gem_object_unbind(obj); 4070 ret = i915_gem_object_unbind(obj);
4050 if (ret) 4071 if (ret)
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4856 struct drm_file *file_priv) 4877 struct drm_file *file_priv)
4857{ 4878{
4858 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4879 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4859 void *obj_addr; 4880 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4860 int ret; 4881 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4861 char __user *user_data;
4862 4882
4863 user_data = (char __user *) (uintptr_t) args->data_ptr; 4883 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4864 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4865 4884
4866 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); 4885 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4867 ret = copy_from_user(obj_addr, user_data, args->size); 4886 unsigned long unwritten;
4868 if (ret) 4887
4869 return -EFAULT; 4888 /* The physical object once assigned is fixed for the lifetime
4889 * of the obj, so we can safely drop the lock and continue
4890 * to access vaddr.
4891 */
4892 mutex_unlock(&dev->struct_mutex);
4893 unwritten = copy_from_user(vaddr, user_data, args->size);
4894 mutex_lock(&dev->struct_mutex);
4895 if (unwritten)
4896 return -EFAULT;
4897 }
4870 4898
4871 drm_agp_chipset_flush(dev); 4899 drm_agp_chipset_flush(dev);
4872 return 0; 4900 return 0;
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
4900 int lists_empty; 4928 int lists_empty;
4901 4929
4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4930 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4903 list_empty(&dev_priv->render_ring.active_list) && 4931 list_empty(&dev_priv->mm.active_list);
4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4905 list_empty(&dev_priv->blt_ring.active_list);
4906 4932
4907 return !lists_empty; 4933 return !lists_empty;
4908} 4934}