aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c239
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--include/drm/i915_drm.h54
5 files changed, 273 insertions, 78 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e3e5d5094232..d67be655c532 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
813 case I915_PARAM_HAS_PAGEFLIPPING: 813 case I915_PARAM_HAS_PAGEFLIPPING:
814 value = 1; 814 value = 1;
815 break; 815 break;
816 case I915_PARAM_HAS_EXECBUF2:
817 /* depends on GEM */
818 value = dev_priv->has_gem;
819 break;
816 default: 820 default:
817 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 821 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
818 param->param); 822 param->param);
819 return -EINVAL; 823 return -EINVAL;
820 } 824 }
821 825
@@ -1646,6 +1650,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1646 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1650 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1647 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1651 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1648 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1652 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1653 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
1649 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1654 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1650 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1655 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1651 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 1656 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9a05f1a01025..7eb4ad51034d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -815,6 +815,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
815 struct drm_file *file_priv); 815 struct drm_file *file_priv);
816int i915_gem_execbuffer(struct drm_device *dev, void *data, 816int i915_gem_execbuffer(struct drm_device *dev, void *data,
817 struct drm_file *file_priv); 817 struct drm_file *file_priv);
818int i915_gem_execbuffer2(struct drm_device *dev, void *data,
819 struct drm_file *file_priv);
818int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 820int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
819 struct drm_file *file_priv); 821 struct drm_file *file_priv);
820int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 822int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -881,6 +883,9 @@ void i915_gem_shrinker_exit(void);
881void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 883void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
882void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 884void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
883void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 885void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
886bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
887 int tiling_mode);
888bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
884 889
885/* i915_gem_debug.c */ 890/* i915_gem_debug.c */
886void i915_gem_dump_object(struct drm_gem_object *obj, int len, 891void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9e81a0ddafac..0330c3aa8032 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3199,7 +3199,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3199static int 3199static int
3200i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3200i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3201 struct drm_file *file_priv, 3201 struct drm_file *file_priv,
3202 struct drm_i915_gem_exec_object *entry, 3202 struct drm_i915_gem_exec_object2 *entry,
3203 struct drm_i915_gem_relocation_entry *relocs) 3203 struct drm_i915_gem_relocation_entry *relocs)
3204{ 3204{
3205 struct drm_device *dev = obj->dev; 3205 struct drm_device *dev = obj->dev;
@@ -3207,12 +3207,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3207 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3207 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3208 int i, ret; 3208 int i, ret;
3209 void __iomem *reloc_page; 3209 void __iomem *reloc_page;
3210 bool need_fence;
3211
3212 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3213 obj_priv->tiling_mode != I915_TILING_NONE;
3214
3215 /* Check fence reg constraints and rebind if necessary */
3216 if (need_fence && !i915_obj_fenceable(dev, obj))
3217 i915_gem_object_unbind(obj);
3210 3218
3211 /* Choose the GTT offset for our buffer and put it there. */ 3219 /* Choose the GTT offset for our buffer and put it there. */
3212 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3220 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3213 if (ret) 3221 if (ret)
3214 return ret; 3222 return ret;
3215 3223
3224 /*
3225 * Pre-965 chips need a fence register set up in order to
3226 * properly handle blits to/from tiled surfaces.
3227 */
3228 if (need_fence) {
3229 ret = i915_gem_object_get_fence_reg(obj);
3230 if (ret != 0) {
3231 if (ret != -EBUSY && ret != -ERESTARTSYS)
3232 DRM_ERROR("Failure to install fence: %d\n",
3233 ret);
3234 i915_gem_object_unpin(obj);
3235 return ret;
3236 }
3237 }
3238
3216 entry->offset = obj_priv->gtt_offset; 3239 entry->offset = obj_priv->gtt_offset;
3217 3240
3218 /* Apply the relocations, using the GTT aperture to avoid cache 3241 /* Apply the relocations, using the GTT aperture to avoid cache
@@ -3374,7 +3397,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3374 */ 3397 */
3375static int 3398static int
3376i915_dispatch_gem_execbuffer(struct drm_device *dev, 3399i915_dispatch_gem_execbuffer(struct drm_device *dev,
3377 struct drm_i915_gem_execbuffer *exec, 3400 struct drm_i915_gem_execbuffer2 *exec,
3378 struct drm_clip_rect *cliprects, 3401 struct drm_clip_rect *cliprects,
3379 uint64_t exec_offset) 3402 uint64_t exec_offset)
3380{ 3403{
@@ -3464,7 +3487,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3464} 3487}
3465 3488
3466static int 3489static int
3467i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, 3490i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3468 uint32_t buffer_count, 3491 uint32_t buffer_count,
3469 struct drm_i915_gem_relocation_entry **relocs) 3492 struct drm_i915_gem_relocation_entry **relocs)
3470{ 3493{
@@ -3479,8 +3502,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3479 } 3502 }
3480 3503
3481 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3504 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3482 if (*relocs == NULL) 3505 if (*relocs == NULL) {
3506 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3483 return -ENOMEM; 3507 return -ENOMEM;
3508 }
3484 3509
3485 for (i = 0; i < buffer_count; i++) { 3510 for (i = 0; i < buffer_count; i++) {
3486 struct drm_i915_gem_relocation_entry __user *user_relocs; 3511 struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3504,7 +3529,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3504} 3529}
3505 3530
3506static int 3531static int
3507i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, 3532i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3508 uint32_t buffer_count, 3533 uint32_t buffer_count,
3509 struct drm_i915_gem_relocation_entry *relocs) 3534 struct drm_i915_gem_relocation_entry *relocs)
3510{ 3535{
@@ -3537,7 +3562,7 @@ err:
3537} 3562}
3538 3563
3539static int 3564static int
3540i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, 3565i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3541 uint64_t exec_offset) 3566 uint64_t exec_offset)
3542{ 3567{
3543 uint32_t exec_start, exec_len; 3568 uint32_t exec_start, exec_len;
@@ -3590,18 +3615,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3590} 3615}
3591 3616
3592int 3617int
3593i915_gem_execbuffer(struct drm_device *dev, void *data, 3618i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3594 struct drm_file *file_priv) 3619 struct drm_file *file_priv,
3620 struct drm_i915_gem_execbuffer2 *args,
3621 struct drm_i915_gem_exec_object2 *exec_list)
3595{ 3622{
3596 drm_i915_private_t *dev_priv = dev->dev_private; 3623 drm_i915_private_t *dev_priv = dev->dev_private;
3597 struct drm_i915_gem_execbuffer *args = data;
3598 struct drm_i915_gem_exec_object *exec_list = NULL;
3599 struct drm_gem_object **object_list = NULL; 3624 struct drm_gem_object **object_list = NULL;
3600 struct drm_gem_object *batch_obj; 3625 struct drm_gem_object *batch_obj;
3601 struct drm_i915_gem_object *obj_priv; 3626 struct drm_i915_gem_object *obj_priv;
3602 struct drm_clip_rect *cliprects = NULL; 3627 struct drm_clip_rect *cliprects = NULL;
3603 struct drm_i915_gem_relocation_entry *relocs; 3628 struct drm_i915_gem_relocation_entry *relocs;
3604 int ret, ret2, i, pinned = 0; 3629 int ret = 0, ret2, i, pinned = 0;
3605 uint64_t exec_offset; 3630 uint64_t exec_offset;
3606 uint32_t seqno, flush_domains, reloc_index; 3631 uint32_t seqno, flush_domains, reloc_index;
3607 int pin_tries, flips; 3632 int pin_tries, flips;
@@ -3615,25 +3640,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3615 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3640 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3616 return -EINVAL; 3641 return -EINVAL;
3617 } 3642 }
3618 /* Copy in the exec list from userland */
3619 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3620 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); 3643 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3621 if (exec_list == NULL || object_list == NULL) { 3644 if (object_list == NULL) {
3622 DRM_ERROR("Failed to allocate exec or object list " 3645 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3623 "for %d buffers\n",
3624 args->buffer_count); 3646 args->buffer_count);
3625 ret = -ENOMEM; 3647 ret = -ENOMEM;
3626 goto pre_mutex_err; 3648 goto pre_mutex_err;
3627 } 3649 }
3628 ret = copy_from_user(exec_list,
3629 (struct drm_i915_relocation_entry __user *)
3630 (uintptr_t) args->buffers_ptr,
3631 sizeof(*exec_list) * args->buffer_count);
3632 if (ret != 0) {
3633 DRM_ERROR("copy %d exec entries failed %d\n",
3634 args->buffer_count, ret);
3635 goto pre_mutex_err;
3636 }
3637 3650
3638 if (args->num_cliprects != 0) { 3651 if (args->num_cliprects != 0) {
3639 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3652 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
@@ -3885,20 +3898,6 @@ err:
3885 3898
3886 mutex_unlock(&dev->struct_mutex); 3899 mutex_unlock(&dev->struct_mutex);
3887 3900
3888 if (!ret) {
3889 /* Copy the new buffer offsets back to the user's exec list. */
3890 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3891 (uintptr_t) args->buffers_ptr,
3892 exec_list,
3893 sizeof(*exec_list) * args->buffer_count);
3894 if (ret) {
3895 ret = -EFAULT;
3896 DRM_ERROR("failed to copy %d exec entries "
3897 "back to user (%d)\n",
3898 args->buffer_count, ret);
3899 }
3900 }
3901
3902 /* Copy the updated relocations out regardless of current error 3901 /* Copy the updated relocations out regardless of current error
3903 * state. Failure to update the relocs would mean that the next 3902 * state. Failure to update the relocs would mean that the next
3904 * time userland calls execbuf, it would do so with presumed offset 3903 * time userland calls execbuf, it would do so with presumed offset
@@ -3915,12 +3914,158 @@ err:
3915 3914
3916pre_mutex_err: 3915pre_mutex_err:
3917 drm_free_large(object_list); 3916 drm_free_large(object_list);
3918 drm_free_large(exec_list);
3919 kfree(cliprects); 3917 kfree(cliprects);
3920 3918
3921 return ret; 3919 return ret;
3922} 3920}
3923 3921
3922/*
3923 * Legacy execbuffer just creates an exec2 list from the original exec object
3924 * list array and passes it to the real function.
3925 */
3926int
3927i915_gem_execbuffer(struct drm_device *dev, void *data,
3928 struct drm_file *file_priv)
3929{
3930 struct drm_i915_gem_execbuffer *args = data;
3931 struct drm_i915_gem_execbuffer2 exec2;
3932 struct drm_i915_gem_exec_object *exec_list = NULL;
3933 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3934 int ret, i;
3935
3936#if WATCH_EXEC
3937 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3938 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3939#endif
3940
3941 if (args->buffer_count < 1) {
3942 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3943 return -EINVAL;
3944 }
3945
3946 /* Copy in the exec list from userland */
3947 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3948 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3949 if (exec_list == NULL || exec2_list == NULL) {
3950 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3951 args->buffer_count);
3952 drm_free_large(exec_list);
3953 drm_free_large(exec2_list);
3954 return -ENOMEM;
3955 }
3956 ret = copy_from_user(exec_list,
3957 (struct drm_i915_relocation_entry __user *)
3958 (uintptr_t) args->buffers_ptr,
3959 sizeof(*exec_list) * args->buffer_count);
3960 if (ret != 0) {
3961 DRM_ERROR("copy %d exec entries failed %d\n",
3962 args->buffer_count, ret);
3963 drm_free_large(exec_list);
3964 drm_free_large(exec2_list);
3965 return -EFAULT;
3966 }
3967
3968 for (i = 0; i < args->buffer_count; i++) {
3969 exec2_list[i].handle = exec_list[i].handle;
3970 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3971 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3972 exec2_list[i].alignment = exec_list[i].alignment;
3973 exec2_list[i].offset = exec_list[i].offset;
3974 if (!IS_I965G(dev))
3975 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3976 else
3977 exec2_list[i].flags = 0;
3978 }
3979
3980 exec2.buffers_ptr = args->buffers_ptr;
3981 exec2.buffer_count = args->buffer_count;
3982 exec2.batch_start_offset = args->batch_start_offset;
3983 exec2.batch_len = args->batch_len;
3984 exec2.DR1 = args->DR1;
3985 exec2.DR4 = args->DR4;
3986 exec2.num_cliprects = args->num_cliprects;
3987 exec2.cliprects_ptr = args->cliprects_ptr;
3988 exec2.flags = 0;
3989
3990 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3991 if (!ret) {
3992 /* Copy the new buffer offsets back to the user's exec list. */
3993 for (i = 0; i < args->buffer_count; i++)
3994 exec_list[i].offset = exec2_list[i].offset;
3995 /* ... and back out to userspace */
3996 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3997 (uintptr_t) args->buffers_ptr,
3998 exec_list,
3999 sizeof(*exec_list) * args->buffer_count);
4000 if (ret) {
4001 ret = -EFAULT;
4002 DRM_ERROR("failed to copy %d exec entries "
4003 "back to user (%d)\n",
4004 args->buffer_count, ret);
4005 }
4006 } else {
4007 DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
4008 }
4009
4010 drm_free_large(exec_list);
4011 drm_free_large(exec2_list);
4012 return ret;
4013}
4014
4015int
4016i915_gem_execbuffer2(struct drm_device *dev, void *data,
4017 struct drm_file *file_priv)
4018{
4019 struct drm_i915_gem_execbuffer2 *args = data;
4020 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4021 int ret;
4022
4023#if WATCH_EXEC
4024 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4025 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4026#endif
4027
4028 if (args->buffer_count < 1) {
4029 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4030 return -EINVAL;
4031 }
4032
4033 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4034 if (exec2_list == NULL) {
4035 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4036 args->buffer_count);
4037 return -ENOMEM;
4038 }
4039 ret = copy_from_user(exec2_list,
4040 (struct drm_i915_relocation_entry __user *)
4041 (uintptr_t) args->buffers_ptr,
4042 sizeof(*exec2_list) * args->buffer_count);
4043 if (ret != 0) {
4044 DRM_ERROR("copy %d exec entries failed %d\n",
4045 args->buffer_count, ret);
4046 drm_free_large(exec2_list);
4047 return -EFAULT;
4048 }
4049
4050 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4051 if (!ret) {
4052 /* Copy the new buffer offsets back to the user's exec list. */
4053 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4054 (uintptr_t) args->buffers_ptr,
4055 exec2_list,
4056 sizeof(*exec2_list) * args->buffer_count);
4057 if (ret) {
4058 ret = -EFAULT;
4059 DRM_ERROR("failed to copy %d exec entries "
4060 "back to user (%d)\n",
4061 args->buffer_count, ret);
4062 }
4063 }
4064
4065 drm_free_large(exec2_list);
4066 return ret;
4067}
4068
3924int 4069int
3925i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4070i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3926{ 4071{
@@ -3934,19 +4079,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3934 if (ret) 4079 if (ret)
3935 return ret; 4080 return ret;
3936 } 4081 }
3937 /* 4082
3938 * Pre-965 chips need a fence register set up in order to
3939 * properly handle tiled surfaces.
3940 */
3941 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3942 ret = i915_gem_object_get_fence_reg(obj);
3943 if (ret != 0) {
3944 if (ret != -EBUSY && ret != -ERESTARTSYS)
3945 DRM_ERROR("Failure to install fence: %d\n",
3946 ret);
3947 return ret;
3948 }
3949 }
3950 obj_priv->pin_count++; 4083 obj_priv->pin_count++;
3951 4084
3952 /* If the object is not active and not pending a flush, 4085 /* If the object is not active and not pending a flush,
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c09bb..df278b2685bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
304 304
305 305
306/** 306/**
307 * Returns the size of the fence for a tiled object of the given size. 307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
308 */ 309 */
309static int 310bool
310i915_get_fence_size(struct drm_device *dev, int size) 311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
311{ 312{
312 int i; 313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
313 int start;
314 314
315 if (IS_I965G(dev)) { 315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */ 316 /* The 965 can have fences at any page boundary. */
317 return ALIGN(size, 4096); 317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
318 } else { 323 } else {
319 /* Align the size to a power of two greater than the smallest 324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
320 * fence size. 325 return false;
321 */ 326 }
322 if (IS_I9XX(dev))
323 start = 1024 * 1024;
324 else
325 start = 512 * 1024;
326 327
327 for (i = start; i < size; i <<= 1) 328 /* Power of two sized... */
328 ; 329 if (obj->size & (obj->size - 1))
330 return false;
329 331
330 return i; 332 /* Objects must be size aligned as well */
331 } 333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
332} 336}
333 337
334/* Check pitch constriants for all chips & tiling formats */ 338/* Check pitch constriants for all chips & tiling formats */
335static bool 339bool
336i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
337{ 341{
338 int tile_width; 342 int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
384 if (stride & (stride - 1)) 388 if (stride & (stride - 1))
385 return false; 389 return false;
386 390
387 /* We don't 0handle the aperture area covered by the fence being bigger
388 * than the object size.
389 */
390 if (i915_get_fence_size(dev, size) != size)
391 return false;
392
393 return true; 391 return true;
394} 392}
395 393
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index ec3f5e80a5df..b64a8d7cdf6d 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -188,6 +188,7 @@ typedef struct _drm_i915_sarea {
188#define DRM_I915_GEM_MADVISE 0x26 188#define DRM_I915_GEM_MADVISE 0x26
189#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 189#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
190#define DRM_I915_OVERLAY_ATTRS 0x28 190#define DRM_I915_OVERLAY_ATTRS 0x28
191#define DRM_I915_GEM_EXECBUFFER2 0x29
191 192
192#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 193#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
193#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 194#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -207,6 +208,7 @@ typedef struct _drm_i915_sarea {
207#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 208#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
208#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 209#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
209#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 210#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
211#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
210#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 212#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
211#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 213#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
212#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 214#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -272,6 +274,7 @@ typedef struct drm_i915_irq_wait {
272#define I915_PARAM_NUM_FENCES_AVAIL 6 274#define I915_PARAM_NUM_FENCES_AVAIL 6
273#define I915_PARAM_HAS_OVERLAY 7 275#define I915_PARAM_HAS_OVERLAY 7
274#define I915_PARAM_HAS_PAGEFLIPPING 8 276#define I915_PARAM_HAS_PAGEFLIPPING 8
277#define I915_PARAM_HAS_EXECBUF2 9
275 278
276typedef struct drm_i915_getparam { 279typedef struct drm_i915_getparam {
277 int param; 280 int param;
@@ -567,6 +570,57 @@ struct drm_i915_gem_execbuffer {
567 __u64 cliprects_ptr; 570 __u64 cliprects_ptr;
568}; 571};
569 572
573struct drm_i915_gem_exec_object2 {
574 /**
575 * User's handle for a buffer to be bound into the GTT for this
576 * operation.
577 */
578 __u32 handle;
579
580 /** Number of relocations to be performed on this buffer */
581 __u32 relocation_count;
582 /**
583 * Pointer to array of struct drm_i915_gem_relocation_entry containing
584 * the relocations to be performed in this buffer.
585 */
586 __u64 relocs_ptr;
587
588 /** Required alignment in graphics aperture */
589 __u64 alignment;
590
591 /**
592 * Returned value of the updated offset of the object, for future
593 * presumed_offset writes.
594 */
595 __u64 offset;
596
597#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
598 __u64 flags;
599 __u64 rsvd1;
600 __u64 rsvd2;
601};
602
603struct drm_i915_gem_execbuffer2 {
604 /**
605 * List of gem_exec_object2 structs
606 */
607 __u64 buffers_ptr;
608 __u32 buffer_count;
609
610 /** Offset in the batchbuffer to start execution from. */
611 __u32 batch_start_offset;
612 /** Bytes used in batchbuffer from batch_start_offset */
613 __u32 batch_len;
614 __u32 DR1;
615 __u32 DR4;
616 __u32 num_cliprects;
617 /** This is a struct drm_clip_rect *cliprects */
618 __u64 cliprects_ptr;
619 __u64 flags; /* currently unused */
620 __u64 rsvd1;
621 __u64 rsvd2;
622};
623
570struct drm_i915_gem_pin { 624struct drm_i915_gem_pin {
571 /** Handle of the buffer to be pinned. */ 625 /** Handle of the buffer to be pinned. */
572 __u32 handle; 626 __u32 handle;