diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 300 |
1 files changed, 240 insertions, 60 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8c463cf2050..0c67924ca80 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2021 | /* blow away mappings if mapped through GTT */ | 2021 | /* blow away mappings if mapped through GTT */ |
2022 | i915_gem_release_mmap(obj); | 2022 | i915_gem_release_mmap(obj); |
2023 | 2023 | ||
2024 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2025 | i915_gem_clear_fence_reg(obj); | ||
2026 | |||
2027 | /* Move the object to the CPU domain to ensure that | 2024 | /* Move the object to the CPU domain to ensure that |
2028 | * any possible CPU writes while it's not in the GTT | 2025 | * any possible CPU writes while it's not in the GTT |
2029 | * are flushed when we go to remap it. This will | 2026 | * are flushed when we go to remap it. This will |
@@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2039 | 2036 | ||
2040 | BUG_ON(obj_priv->active); | 2037 | BUG_ON(obj_priv->active); |
2041 | 2038 | ||
2039 | /* release the fence reg _after_ flushing */ | ||
2040 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2041 | i915_gem_clear_fence_reg(obj); | ||
2042 | |||
2042 | if (obj_priv->agp_mem != NULL) { | 2043 | if (obj_priv->agp_mem != NULL) { |
2043 | drm_unbind_agp(obj_priv->agp_mem); | 2044 | drm_unbind_agp(obj_priv->agp_mem); |
2044 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2045 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
@@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2581 | bool retry_alloc = false; | 2582 | bool retry_alloc = false; |
2582 | int ret; | 2583 | int ret; |
2583 | 2584 | ||
2584 | if (dev_priv->mm.suspended) | ||
2585 | return -EBUSY; | ||
2586 | |||
2587 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2585 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
2588 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2586 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2589 | return -EINVAL; | 2587 | return -EINVAL; |
@@ -2839,6 +2837,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2839 | return 0; | 2837 | return 0; |
2840 | } | 2838 | } |
2841 | 2839 | ||
2840 | /* | ||
2841 | * Prepare buffer for display plane. Use uninterruptible for possible flush | ||
2842 | * wait, as in modesetting process we're not supposed to be interrupted. | ||
2843 | */ | ||
2844 | int | ||
2845 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | ||
2846 | { | ||
2847 | struct drm_device *dev = obj->dev; | ||
2848 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2849 | uint32_t old_write_domain, old_read_domains; | ||
2850 | int ret; | ||
2851 | |||
2852 | /* Not valid to be called on unbound objects. */ | ||
2853 | if (obj_priv->gtt_space == NULL) | ||
2854 | return -EINVAL; | ||
2855 | |||
2856 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2857 | |||
2858 | /* Wait on any GPU rendering and flushing to occur. */ | ||
2859 | if (obj_priv->active) { | ||
2860 | #if WATCH_BUF | ||
2861 | DRM_INFO("%s: object %p wait for seqno %08x\n", | ||
2862 | __func__, obj, obj_priv->last_rendering_seqno); | ||
2863 | #endif | ||
2864 | ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); | ||
2865 | if (ret != 0) | ||
2866 | return ret; | ||
2867 | } | ||
2868 | |||
2869 | old_write_domain = obj->write_domain; | ||
2870 | old_read_domains = obj->read_domains; | ||
2871 | |||
2872 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
2873 | |||
2874 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2875 | |||
2876 | /* It should now be out of any other write domains, and we can update | ||
2877 | * the domain values for our changes. | ||
2878 | */ | ||
2879 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
2880 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
2881 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
2882 | obj_priv->dirty = 1; | ||
2883 | |||
2884 | trace_i915_gem_object_change_domain(obj, | ||
2885 | old_read_domains, | ||
2886 | old_write_domain); | ||
2887 | |||
2888 | return 0; | ||
2889 | } | ||
2890 | |||
2842 | /** | 2891 | /** |
2843 | * Moves a single object to the CPU read, and possibly write domain. | 2892 | * Moves a single object to the CPU read, and possibly write domain. |
2844 | * | 2893 | * |
@@ -3198,7 +3247,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3198 | static int | 3247 | static int |
3199 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3248 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
3200 | struct drm_file *file_priv, | 3249 | struct drm_file *file_priv, |
3201 | struct drm_i915_gem_exec_object *entry, | 3250 | struct drm_i915_gem_exec_object2 *entry, |
3202 | struct drm_i915_gem_relocation_entry *relocs) | 3251 | struct drm_i915_gem_relocation_entry *relocs) |
3203 | { | 3252 | { |
3204 | struct drm_device *dev = obj->dev; | 3253 | struct drm_device *dev = obj->dev; |
@@ -3206,12 +3255,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3206 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3255 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3207 | int i, ret; | 3256 | int i, ret; |
3208 | void __iomem *reloc_page; | 3257 | void __iomem *reloc_page; |
3258 | bool need_fence; | ||
3259 | |||
3260 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3261 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3262 | |||
3263 | /* Check fence reg constraints and rebind if necessary */ | ||
3264 | if (need_fence && !i915_obj_fenceable(dev, obj)) | ||
3265 | i915_gem_object_unbind(obj); | ||
3209 | 3266 | ||
3210 | /* Choose the GTT offset for our buffer and put it there. */ | 3267 | /* Choose the GTT offset for our buffer and put it there. */ |
3211 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3268 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
3212 | if (ret) | 3269 | if (ret) |
3213 | return ret; | 3270 | return ret; |
3214 | 3271 | ||
3272 | /* | ||
3273 | * Pre-965 chips need a fence register set up in order to | ||
3274 | * properly handle blits to/from tiled surfaces. | ||
3275 | */ | ||
3276 | if (need_fence) { | ||
3277 | ret = i915_gem_object_get_fence_reg(obj); | ||
3278 | if (ret != 0) { | ||
3279 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3280 | DRM_ERROR("Failure to install fence: %d\n", | ||
3281 | ret); | ||
3282 | i915_gem_object_unpin(obj); | ||
3283 | return ret; | ||
3284 | } | ||
3285 | } | ||
3286 | |||
3215 | entry->offset = obj_priv->gtt_offset; | 3287 | entry->offset = obj_priv->gtt_offset; |
3216 | 3288 | ||
3217 | /* Apply the relocations, using the GTT aperture to avoid cache | 3289 | /* Apply the relocations, using the GTT aperture to avoid cache |
@@ -3373,7 +3445,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3373 | */ | 3445 | */ |
3374 | static int | 3446 | static int |
3375 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 3447 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
3376 | struct drm_i915_gem_execbuffer *exec, | 3448 | struct drm_i915_gem_execbuffer2 *exec, |
3377 | struct drm_clip_rect *cliprects, | 3449 | struct drm_clip_rect *cliprects, |
3378 | uint64_t exec_offset) | 3450 | uint64_t exec_offset) |
3379 | { | 3451 | { |
@@ -3463,7 +3535,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
3463 | } | 3535 | } |
3464 | 3536 | ||
3465 | static int | 3537 | static int |
3466 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | 3538 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, |
3467 | uint32_t buffer_count, | 3539 | uint32_t buffer_count, |
3468 | struct drm_i915_gem_relocation_entry **relocs) | 3540 | struct drm_i915_gem_relocation_entry **relocs) |
3469 | { | 3541 | { |
@@ -3478,8 +3550,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3478 | } | 3550 | } |
3479 | 3551 | ||
3480 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | 3552 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); |
3481 | if (*relocs == NULL) | 3553 | if (*relocs == NULL) { |
3554 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
3482 | return -ENOMEM; | 3555 | return -ENOMEM; |
3556 | } | ||
3483 | 3557 | ||
3484 | for (i = 0; i < buffer_count; i++) { | 3558 | for (i = 0; i < buffer_count; i++) { |
3485 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3559 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
@@ -3503,7 +3577,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3503 | } | 3577 | } |
3504 | 3578 | ||
3505 | static int | 3579 | static int |
3506 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | 3580 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, |
3507 | uint32_t buffer_count, | 3581 | uint32_t buffer_count, |
3508 | struct drm_i915_gem_relocation_entry *relocs) | 3582 | struct drm_i915_gem_relocation_entry *relocs) |
3509 | { | 3583 | { |
@@ -3536,7 +3610,7 @@ err: | |||
3536 | } | 3610 | } |
3537 | 3611 | ||
3538 | static int | 3612 | static int |
3539 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | 3613 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, |
3540 | uint64_t exec_offset) | 3614 | uint64_t exec_offset) |
3541 | { | 3615 | { |
3542 | uint32_t exec_start, exec_len; | 3616 | uint32_t exec_start, exec_len; |
@@ -3589,18 +3663,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3589 | } | 3663 | } |
3590 | 3664 | ||
3591 | int | 3665 | int |
3592 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3666 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3593 | struct drm_file *file_priv) | 3667 | struct drm_file *file_priv, |
3668 | struct drm_i915_gem_execbuffer2 *args, | ||
3669 | struct drm_i915_gem_exec_object2 *exec_list) | ||
3594 | { | 3670 | { |
3595 | drm_i915_private_t *dev_priv = dev->dev_private; | 3671 | drm_i915_private_t *dev_priv = dev->dev_private; |
3596 | struct drm_i915_gem_execbuffer *args = data; | ||
3597 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3598 | struct drm_gem_object **object_list = NULL; | 3672 | struct drm_gem_object **object_list = NULL; |
3599 | struct drm_gem_object *batch_obj; | 3673 | struct drm_gem_object *batch_obj; |
3600 | struct drm_i915_gem_object *obj_priv; | 3674 | struct drm_i915_gem_object *obj_priv; |
3601 | struct drm_clip_rect *cliprects = NULL; | 3675 | struct drm_clip_rect *cliprects = NULL; |
3602 | struct drm_i915_gem_relocation_entry *relocs; | 3676 | struct drm_i915_gem_relocation_entry *relocs; |
3603 | int ret, ret2, i, pinned = 0; | 3677 | int ret = 0, ret2, i, pinned = 0; |
3604 | uint64_t exec_offset; | 3678 | uint64_t exec_offset; |
3605 | uint32_t seqno, flush_domains, reloc_index; | 3679 | uint32_t seqno, flush_domains, reloc_index; |
3606 | int pin_tries, flips; | 3680 | int pin_tries, flips; |
@@ -3614,25 +3688,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3614 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3688 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
3615 | return -EINVAL; | 3689 | return -EINVAL; |
3616 | } | 3690 | } |
3617 | /* Copy in the exec list from userland */ | ||
3618 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3619 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | 3691 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); |
3620 | if (exec_list == NULL || object_list == NULL) { | 3692 | if (object_list == NULL) { |
3621 | DRM_ERROR("Failed to allocate exec or object list " | 3693 | DRM_ERROR("Failed to allocate object list for %d buffers\n", |
3622 | "for %d buffers\n", | ||
3623 | args->buffer_count); | 3694 | args->buffer_count); |
3624 | ret = -ENOMEM; | 3695 | ret = -ENOMEM; |
3625 | goto pre_mutex_err; | 3696 | goto pre_mutex_err; |
3626 | } | 3697 | } |
3627 | ret = copy_from_user(exec_list, | ||
3628 | (struct drm_i915_relocation_entry __user *) | ||
3629 | (uintptr_t) args->buffers_ptr, | ||
3630 | sizeof(*exec_list) * args->buffer_count); | ||
3631 | if (ret != 0) { | ||
3632 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3633 | args->buffer_count, ret); | ||
3634 | goto pre_mutex_err; | ||
3635 | } | ||
3636 | 3698 | ||
3637 | if (args->num_cliprects != 0) { | 3699 | if (args->num_cliprects != 0) { |
3638 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 3700 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), |
@@ -3884,20 +3946,6 @@ err: | |||
3884 | 3946 | ||
3885 | mutex_unlock(&dev->struct_mutex); | 3947 | mutex_unlock(&dev->struct_mutex); |
3886 | 3948 | ||
3887 | if (!ret) { | ||
3888 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
3889 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
3890 | (uintptr_t) args->buffers_ptr, | ||
3891 | exec_list, | ||
3892 | sizeof(*exec_list) * args->buffer_count); | ||
3893 | if (ret) { | ||
3894 | ret = -EFAULT; | ||
3895 | DRM_ERROR("failed to copy %d exec entries " | ||
3896 | "back to user (%d)\n", | ||
3897 | args->buffer_count, ret); | ||
3898 | } | ||
3899 | } | ||
3900 | |||
3901 | /* Copy the updated relocations out regardless of current error | 3949 | /* Copy the updated relocations out regardless of current error |
3902 | * state. Failure to update the relocs would mean that the next | 3950 | * state. Failure to update the relocs would mean that the next |
3903 | * time userland calls execbuf, it would do so with presumed offset | 3951 | * time userland calls execbuf, it would do so with presumed offset |
@@ -3914,12 +3962,156 @@ err: | |||
3914 | 3962 | ||
3915 | pre_mutex_err: | 3963 | pre_mutex_err: |
3916 | drm_free_large(object_list); | 3964 | drm_free_large(object_list); |
3917 | drm_free_large(exec_list); | ||
3918 | kfree(cliprects); | 3965 | kfree(cliprects); |
3919 | 3966 | ||
3920 | return ret; | 3967 | return ret; |
3921 | } | 3968 | } |
3922 | 3969 | ||
3970 | /* | ||
3971 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
3972 | * list array and passes it to the real function. | ||
3973 | */ | ||
3974 | int | ||
3975 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
3976 | struct drm_file *file_priv) | ||
3977 | { | ||
3978 | struct drm_i915_gem_execbuffer *args = data; | ||
3979 | struct drm_i915_gem_execbuffer2 exec2; | ||
3980 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3981 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
3982 | int ret, i; | ||
3983 | |||
3984 | #if WATCH_EXEC | ||
3985 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3986 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3987 | #endif | ||
3988 | |||
3989 | if (args->buffer_count < 1) { | ||
3990 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3991 | return -EINVAL; | ||
3992 | } | ||
3993 | |||
3994 | /* Copy in the exec list from userland */ | ||
3995 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3996 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
3997 | if (exec_list == NULL || exec2_list == NULL) { | ||
3998 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
3999 | args->buffer_count); | ||
4000 | drm_free_large(exec_list); | ||
4001 | drm_free_large(exec2_list); | ||
4002 | return -ENOMEM; | ||
4003 | } | ||
4004 | ret = copy_from_user(exec_list, | ||
4005 | (struct drm_i915_relocation_entry __user *) | ||
4006 | (uintptr_t) args->buffers_ptr, | ||
4007 | sizeof(*exec_list) * args->buffer_count); | ||
4008 | if (ret != 0) { | ||
4009 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4010 | args->buffer_count, ret); | ||
4011 | drm_free_large(exec_list); | ||
4012 | drm_free_large(exec2_list); | ||
4013 | return -EFAULT; | ||
4014 | } | ||
4015 | |||
4016 | for (i = 0; i < args->buffer_count; i++) { | ||
4017 | exec2_list[i].handle = exec_list[i].handle; | ||
4018 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
4019 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
4020 | exec2_list[i].alignment = exec_list[i].alignment; | ||
4021 | exec2_list[i].offset = exec_list[i].offset; | ||
4022 | if (!IS_I965G(dev)) | ||
4023 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
4024 | else | ||
4025 | exec2_list[i].flags = 0; | ||
4026 | } | ||
4027 | |||
4028 | exec2.buffers_ptr = args->buffers_ptr; | ||
4029 | exec2.buffer_count = args->buffer_count; | ||
4030 | exec2.batch_start_offset = args->batch_start_offset; | ||
4031 | exec2.batch_len = args->batch_len; | ||
4032 | exec2.DR1 = args->DR1; | ||
4033 | exec2.DR4 = args->DR4; | ||
4034 | exec2.num_cliprects = args->num_cliprects; | ||
4035 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
4036 | exec2.flags = 0; | ||
4037 | |||
4038 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
4039 | if (!ret) { | ||
4040 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4041 | for (i = 0; i < args->buffer_count; i++) | ||
4042 | exec_list[i].offset = exec2_list[i].offset; | ||
4043 | /* ... and back out to userspace */ | ||
4044 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4045 | (uintptr_t) args->buffers_ptr, | ||
4046 | exec_list, | ||
4047 | sizeof(*exec_list) * args->buffer_count); | ||
4048 | if (ret) { | ||
4049 | ret = -EFAULT; | ||
4050 | DRM_ERROR("failed to copy %d exec entries " | ||
4051 | "back to user (%d)\n", | ||
4052 | args->buffer_count, ret); | ||
4053 | } | ||
4054 | } | ||
4055 | |||
4056 | drm_free_large(exec_list); | ||
4057 | drm_free_large(exec2_list); | ||
4058 | return ret; | ||
4059 | } | ||
4060 | |||
4061 | int | ||
4062 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
4063 | struct drm_file *file_priv) | ||
4064 | { | ||
4065 | struct drm_i915_gem_execbuffer2 *args = data; | ||
4066 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
4067 | int ret; | ||
4068 | |||
4069 | #if WATCH_EXEC | ||
4070 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
4071 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
4072 | #endif | ||
4073 | |||
4074 | if (args->buffer_count < 1) { | ||
4075 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
4076 | return -EINVAL; | ||
4077 | } | ||
4078 | |||
4079 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
4080 | if (exec2_list == NULL) { | ||
4081 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
4082 | args->buffer_count); | ||
4083 | return -ENOMEM; | ||
4084 | } | ||
4085 | ret = copy_from_user(exec2_list, | ||
4086 | (struct drm_i915_relocation_entry __user *) | ||
4087 | (uintptr_t) args->buffers_ptr, | ||
4088 | sizeof(*exec2_list) * args->buffer_count); | ||
4089 | if (ret != 0) { | ||
4090 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4091 | args->buffer_count, ret); | ||
4092 | drm_free_large(exec2_list); | ||
4093 | return -EFAULT; | ||
4094 | } | ||
4095 | |||
4096 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
4097 | if (!ret) { | ||
4098 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4099 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4100 | (uintptr_t) args->buffers_ptr, | ||
4101 | exec2_list, | ||
4102 | sizeof(*exec2_list) * args->buffer_count); | ||
4103 | if (ret) { | ||
4104 | ret = -EFAULT; | ||
4105 | DRM_ERROR("failed to copy %d exec entries " | ||
4106 | "back to user (%d)\n", | ||
4107 | args->buffer_count, ret); | ||
4108 | } | ||
4109 | } | ||
4110 | |||
4111 | drm_free_large(exec2_list); | ||
4112 | return ret; | ||
4113 | } | ||
4114 | |||
3923 | int | 4115 | int |
3924 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4116 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
3925 | { | 4117 | { |
@@ -3933,19 +4125,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3933 | if (ret) | 4125 | if (ret) |
3934 | return ret; | 4126 | return ret; |
3935 | } | 4127 | } |
3936 | /* | 4128 | |
3937 | * Pre-965 chips need a fence register set up in order to | ||
3938 | * properly handle tiled surfaces. | ||
3939 | */ | ||
3940 | if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { | ||
3941 | ret = i915_gem_object_get_fence_reg(obj); | ||
3942 | if (ret != 0) { | ||
3943 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3944 | DRM_ERROR("Failure to install fence: %d\n", | ||
3945 | ret); | ||
3946 | return ret; | ||
3947 | } | ||
3948 | } | ||
3949 | obj_priv->pin_count++; | 4129 | obj_priv->pin_count++; |
3950 | 4130 | ||
3951 | /* If the object is not active and not pending a flush, | 4131 | /* If the object is not active and not pending a flush, |
@@ -4708,7 +4888,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, | |||
4708 | 4888 | ||
4709 | phys_obj->id = id; | 4889 | phys_obj->id = id; |
4710 | 4890 | ||
4711 | phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); | 4891 | phys_obj->handle = drm_pci_alloc(dev, size, 0); |
4712 | if (!phys_obj->handle) { | 4892 | if (!phys_obj->handle) { |
4713 | ret = -ENOMEM; | 4893 | ret = -ENOMEM; |
4714 | goto kfree_obj; | 4894 | goto kfree_obj; |