diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-05 20:11:56 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-05 20:11:56 -0400 |
commit | 9a170caed6fce89da77852575a7eee7dbadee332 (patch) | |
tree | 489082522869cb382a2dc464ccbd474846693a37 /drivers/gpu/drm/i915/i915_gem.c | |
parent | 45ff46c54a31bf8924b61e3e3411654410a3b5c3 (diff) | |
parent | 7b4f3990a22fbe800945f12001bc30db374d0af5 (diff) |
Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (266 commits)
drm/i915: Avoid circular locking from intel_fbdev_fini()
drm/i915: mark display port DPMS state as 'ON' when enabling output
drm/i915: Skip pread/pwrite if size to copy is 0.
drm/i915: avoid struct mutex output_poll mutex lock loop on unload
drm/i915: Rephrase pwrite bounds checking to avoid any potential overflow
drm/i915: Sanity check pread/pwrite
drm/i915: Use pipe state to tell when pipe is off
drm/i915: vblank status not valid while training display port
drivers/gpu/drm/i915/i915_gem.c: Add missing error handling code
drm/i915: Don't mask the return code whilst relocating.
drm/i915: If the GPU hangs twice within 5 seconds, declare it wedged.
drm/i915: Only print 'generating error event' if we actually are
drm/i915: Try to reset gen2 devices.
drm/i915: Clear fence registers on GPU reset
drm/i915: Force the domain to CPU on unbinding whilst wedged.
drm: Move the GTT accounting to i915
drm/i915: Fix refleak during eviction.
i915: Added function to initialize VBT settings
drm/i915: Remove redundant deletion of obj->gpu_write_list
drm/i915: Make get/put pages static
...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 1156 |
1 files changed, 648 insertions, 508 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf4ffbee1c00..29e97c075421 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -37,7 +37,9 @@ | |||
37 | #include <linux/intel-gtt.h> | 37 | #include <linux/intel-gtt.h> |
38 | 38 | ||
39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | 39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); |
40 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 40 | |
41 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | ||
42 | bool pipelined); | ||
41 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 43 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
42 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 44 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
43 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 45 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, |
@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
46 | uint64_t offset, | 48 | uint64_t offset, |
47 | uint64_t size); | 49 | uint64_t size); |
48 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 50 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); |
49 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 51 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, |
52 | bool interruptible); | ||
50 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 53 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
51 | unsigned alignment); | 54 | unsigned alignment); |
52 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 55 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o | |||
55 | struct drm_file *file_priv); | 58 | struct drm_file *file_priv); |
56 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | 59 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); |
57 | 60 | ||
61 | static int | ||
62 | i915_gem_object_get_pages(struct drm_gem_object *obj, | ||
63 | gfp_t gfpmask); | ||
64 | |||
65 | static void | ||
66 | i915_gem_object_put_pages(struct drm_gem_object *obj); | ||
67 | |||
58 | static LIST_HEAD(shrink_list); | 68 | static LIST_HEAD(shrink_list); |
59 | static DEFINE_SPINLOCK(shrink_list_lock); | 69 | static DEFINE_SPINLOCK(shrink_list_lock); |
60 | 70 | ||
71 | /* some bookkeeping */ | ||
72 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | ||
73 | size_t size) | ||
74 | { | ||
75 | dev_priv->mm.object_count++; | ||
76 | dev_priv->mm.object_memory += size; | ||
77 | } | ||
78 | |||
79 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | ||
80 | size_t size) | ||
81 | { | ||
82 | dev_priv->mm.object_count--; | ||
83 | dev_priv->mm.object_memory -= size; | ||
84 | } | ||
85 | |||
86 | static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, | ||
87 | size_t size) | ||
88 | { | ||
89 | dev_priv->mm.gtt_count++; | ||
90 | dev_priv->mm.gtt_memory += size; | ||
91 | } | ||
92 | |||
93 | static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, | ||
94 | size_t size) | ||
95 | { | ||
96 | dev_priv->mm.gtt_count--; | ||
97 | dev_priv->mm.gtt_memory -= size; | ||
98 | } | ||
99 | |||
100 | static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, | ||
101 | size_t size) | ||
102 | { | ||
103 | dev_priv->mm.pin_count++; | ||
104 | dev_priv->mm.pin_memory += size; | ||
105 | } | ||
106 | |||
107 | static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, | ||
108 | size_t size) | ||
109 | { | ||
110 | dev_priv->mm.pin_count--; | ||
111 | dev_priv->mm.pin_memory -= size; | ||
112 | } | ||
113 | |||
114 | int | ||
115 | i915_gem_check_is_wedged(struct drm_device *dev) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | struct completion *x = &dev_priv->error_completion; | ||
119 | unsigned long flags; | ||
120 | int ret; | ||
121 | |||
122 | if (!atomic_read(&dev_priv->mm.wedged)) | ||
123 | return 0; | ||
124 | |||
125 | ret = wait_for_completion_interruptible(x); | ||
126 | if (ret) | ||
127 | return ret; | ||
128 | |||
129 | /* Success, we reset the GPU! */ | ||
130 | if (!atomic_read(&dev_priv->mm.wedged)) | ||
131 | return 0; | ||
132 | |||
133 | /* GPU is hung, bump the completion count to account for | ||
134 | * the token we just consumed so that we never hit zero and | ||
135 | * end up waiting upon a subsequent completion event that | ||
136 | * will never happen. | ||
137 | */ | ||
138 | spin_lock_irqsave(&x->wait.lock, flags); | ||
139 | x->done++; | ||
140 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
141 | return -EIO; | ||
142 | } | ||
143 | |||
144 | static int i915_mutex_lock_interruptible(struct drm_device *dev) | ||
145 | { | ||
146 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
147 | int ret; | ||
148 | |||
149 | ret = i915_gem_check_is_wedged(dev); | ||
150 | if (ret) | ||
151 | return ret; | ||
152 | |||
153 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
154 | if (ret) | ||
155 | return ret; | ||
156 | |||
157 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
158 | mutex_unlock(&dev->struct_mutex); | ||
159 | return -EAGAIN; | ||
160 | } | ||
161 | |||
162 | WARN_ON(i915_verify_lists(dev)); | ||
163 | return 0; | ||
164 | } | ||
165 | |||
61 | static inline bool | 166 | static inline bool |
62 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | 167 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) |
63 | { | 168 | { |
@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | |||
66 | obj_priv->pin_count == 0; | 171 | obj_priv->pin_count == 0; |
67 | } | 172 | } |
68 | 173 | ||
69 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 174 | int i915_gem_do_init(struct drm_device *dev, |
175 | unsigned long start, | ||
70 | unsigned long end) | 176 | unsigned long end) |
71 | { | 177 | { |
72 | drm_i915_private_t *dev_priv = dev->dev_private; | 178 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, | |||
80 | drm_mm_init(&dev_priv->mm.gtt_space, start, | 186 | drm_mm_init(&dev_priv->mm.gtt_space, start, |
81 | end - start); | 187 | end - start); |
82 | 188 | ||
83 | dev->gtt_total = (uint32_t) (end - start); | 189 | dev_priv->mm.gtt_total = end - start; |
84 | 190 | ||
85 | return 0; | 191 | return 0; |
86 | } | 192 | } |
@@ -103,14 +209,16 @@ int | |||
103 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 209 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
104 | struct drm_file *file_priv) | 210 | struct drm_file *file_priv) |
105 | { | 211 | { |
212 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
106 | struct drm_i915_gem_get_aperture *args = data; | 213 | struct drm_i915_gem_get_aperture *args = data; |
107 | 214 | ||
108 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 215 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
109 | return -ENODEV; | 216 | return -ENODEV; |
110 | 217 | ||
111 | args->aper_size = dev->gtt_total; | 218 | mutex_lock(&dev->struct_mutex); |
112 | args->aper_available_size = (args->aper_size - | 219 | args->aper_size = dev_priv->mm.gtt_total; |
113 | atomic_read(&dev->pin_memory)); | 220 | args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; |
221 | mutex_unlock(&dev->struct_mutex); | ||
114 | 222 | ||
115 | return 0; | 223 | return 0; |
116 | } | 224 | } |
@@ -265,7 +373,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
265 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 373 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
266 | remain = args->size; | 374 | remain = args->size; |
267 | 375 | ||
268 | mutex_lock(&dev->struct_mutex); | 376 | ret = i915_mutex_lock_interruptible(dev); |
377 | if (ret) | ||
378 | return ret; | ||
269 | 379 | ||
270 | ret = i915_gem_object_get_pages(obj, 0); | 380 | ret = i915_gem_object_get_pages(obj, 0); |
271 | if (ret != 0) | 381 | if (ret != 0) |
@@ -384,7 +494,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
384 | 494 | ||
385 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 495 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
386 | 496 | ||
387 | mutex_lock(&dev->struct_mutex); | 497 | ret = i915_mutex_lock_interruptible(dev); |
498 | if (ret) | ||
499 | goto fail_put_user_pages; | ||
388 | 500 | ||
389 | ret = i915_gem_object_get_pages_or_evict(obj); | 501 | ret = i915_gem_object_get_pages_or_evict(obj); |
390 | if (ret) | 502 | if (ret) |
@@ -464,21 +576,27 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
464 | struct drm_i915_gem_pread *args = data; | 576 | struct drm_i915_gem_pread *args = data; |
465 | struct drm_gem_object *obj; | 577 | struct drm_gem_object *obj; |
466 | struct drm_i915_gem_object *obj_priv; | 578 | struct drm_i915_gem_object *obj_priv; |
467 | int ret; | 579 | int ret = 0; |
468 | 580 | ||
469 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 581 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
470 | if (obj == NULL) | 582 | if (obj == NULL) |
471 | return -ENOENT; | 583 | return -ENOENT; |
472 | obj_priv = to_intel_bo(obj); | 584 | obj_priv = to_intel_bo(obj); |
473 | 585 | ||
474 | /* Bounds check source. | 586 | /* Bounds check source. */ |
475 | * | 587 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
476 | * XXX: This could use review for overflow issues... | 588 | ret = -EINVAL; |
477 | */ | 589 | goto out; |
478 | if (args->offset > obj->size || args->size > obj->size || | 590 | } |
479 | args->offset + args->size > obj->size) { | 591 | |
480 | drm_gem_object_unreference_unlocked(obj); | 592 | if (args->size == 0) |
481 | return -EINVAL; | 593 | goto out; |
594 | |||
595 | if (!access_ok(VERIFY_WRITE, | ||
596 | (char __user *)(uintptr_t)args->data_ptr, | ||
597 | args->size)) { | ||
598 | ret = -EFAULT; | ||
599 | goto out; | ||
482 | } | 600 | } |
483 | 601 | ||
484 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 602 | if (i915_gem_object_needs_bit17_swizzle(obj)) { |
@@ -490,8 +608,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
490 | file_priv); | 608 | file_priv); |
491 | } | 609 | } |
492 | 610 | ||
611 | out: | ||
493 | drm_gem_object_unreference_unlocked(obj); | 612 | drm_gem_object_unreference_unlocked(obj); |
494 | |||
495 | return ret; | 613 | return ret; |
496 | } | 614 | } |
497 | 615 | ||
@@ -580,11 +698,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
580 | 698 | ||
581 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 699 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
582 | remain = args->size; | 700 | remain = args->size; |
583 | if (!access_ok(VERIFY_READ, user_data, remain)) | ||
584 | return -EFAULT; | ||
585 | 701 | ||
702 | ret = i915_mutex_lock_interruptible(dev); | ||
703 | if (ret) | ||
704 | return ret; | ||
586 | 705 | ||
587 | mutex_lock(&dev->struct_mutex); | ||
588 | ret = i915_gem_object_pin(obj, 0); | 706 | ret = i915_gem_object_pin(obj, 0); |
589 | if (ret) { | 707 | if (ret) { |
590 | mutex_unlock(&dev->struct_mutex); | 708 | mutex_unlock(&dev->struct_mutex); |
@@ -679,7 +797,10 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
679 | goto out_unpin_pages; | 797 | goto out_unpin_pages; |
680 | } | 798 | } |
681 | 799 | ||
682 | mutex_lock(&dev->struct_mutex); | 800 | ret = i915_mutex_lock_interruptible(dev); |
801 | if (ret) | ||
802 | goto out_unpin_pages; | ||
803 | |||
683 | ret = i915_gem_object_pin(obj, 0); | 804 | ret = i915_gem_object_pin(obj, 0); |
684 | if (ret) | 805 | if (ret) |
685 | goto out_unlock; | 806 | goto out_unlock; |
@@ -753,7 +874,9 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
753 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 874 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
754 | remain = args->size; | 875 | remain = args->size; |
755 | 876 | ||
756 | mutex_lock(&dev->struct_mutex); | 877 | ret = i915_mutex_lock_interruptible(dev); |
878 | if (ret) | ||
879 | return ret; | ||
757 | 880 | ||
758 | ret = i915_gem_object_get_pages(obj, 0); | 881 | ret = i915_gem_object_get_pages(obj, 0); |
759 | if (ret != 0) | 882 | if (ret != 0) |
@@ -849,7 +972,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
849 | 972 | ||
850 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 973 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
851 | 974 | ||
852 | mutex_lock(&dev->struct_mutex); | 975 | ret = i915_mutex_lock_interruptible(dev); |
976 | if (ret) | ||
977 | goto fail_put_user_pages; | ||
853 | 978 | ||
854 | ret = i915_gem_object_get_pages_or_evict(obj); | 979 | ret = i915_gem_object_get_pages_or_evict(obj); |
855 | if (ret) | 980 | if (ret) |
@@ -934,14 +1059,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
934 | return -ENOENT; | 1059 | return -ENOENT; |
935 | obj_priv = to_intel_bo(obj); | 1060 | obj_priv = to_intel_bo(obj); |
936 | 1061 | ||
937 | /* Bounds check destination. | 1062 | /* Bounds check destination. */ |
938 | * | 1063 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
939 | * XXX: This could use review for overflow issues... | 1064 | ret = -EINVAL; |
940 | */ | 1065 | goto out; |
941 | if (args->offset > obj->size || args->size > obj->size || | 1066 | } |
942 | args->offset + args->size > obj->size) { | 1067 | |
943 | drm_gem_object_unreference_unlocked(obj); | 1068 | if (args->size == 0) |
944 | return -EINVAL; | 1069 | goto out; |
1070 | |||
1071 | if (!access_ok(VERIFY_READ, | ||
1072 | (char __user *)(uintptr_t)args->data_ptr, | ||
1073 | args->size)) { | ||
1074 | ret = -EFAULT; | ||
1075 | goto out; | ||
945 | } | 1076 | } |
946 | 1077 | ||
947 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 1078 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
@@ -953,7 +1084,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
953 | if (obj_priv->phys_obj) | 1084 | if (obj_priv->phys_obj) |
954 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); | 1085 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); |
955 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 1086 | else if (obj_priv->tiling_mode == I915_TILING_NONE && |
956 | dev->gtt_total != 0 && | 1087 | obj_priv->gtt_space && |
957 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 1088 | obj->write_domain != I915_GEM_DOMAIN_CPU) { |
958 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); | 1089 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); |
959 | if (ret == -EFAULT) { | 1090 | if (ret == -EFAULT) { |
@@ -975,8 +1106,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
975 | DRM_INFO("pwrite failed %d\n", ret); | 1106 | DRM_INFO("pwrite failed %d\n", ret); |
976 | #endif | 1107 | #endif |
977 | 1108 | ||
1109 | out: | ||
978 | drm_gem_object_unreference_unlocked(obj); | 1110 | drm_gem_object_unreference_unlocked(obj); |
979 | |||
980 | return ret; | 1111 | return ret; |
981 | } | 1112 | } |
982 | 1113 | ||
@@ -1017,14 +1148,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1017 | return -ENOENT; | 1148 | return -ENOENT; |
1018 | obj_priv = to_intel_bo(obj); | 1149 | obj_priv = to_intel_bo(obj); |
1019 | 1150 | ||
1020 | mutex_lock(&dev->struct_mutex); | 1151 | ret = i915_mutex_lock_interruptible(dev); |
1152 | if (ret) { | ||
1153 | drm_gem_object_unreference_unlocked(obj); | ||
1154 | return ret; | ||
1155 | } | ||
1021 | 1156 | ||
1022 | intel_mark_busy(dev, obj); | 1157 | intel_mark_busy(dev, obj); |
1023 | 1158 | ||
1024 | #if WATCH_BUF | ||
1025 | DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", | ||
1026 | obj, obj->size, read_domains, write_domain); | ||
1027 | #endif | ||
1028 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1159 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
1029 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 1160 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
1030 | 1161 | ||
@@ -1048,7 +1179,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1048 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1179 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
1049 | } | 1180 | } |
1050 | 1181 | ||
1051 | |||
1052 | /* Maintain LRU order of "inactive" objects */ | 1182 | /* Maintain LRU order of "inactive" objects */ |
1053 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | 1183 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) |
1054 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1184 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
@@ -1067,27 +1197,23 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1067 | { | 1197 | { |
1068 | struct drm_i915_gem_sw_finish *args = data; | 1198 | struct drm_i915_gem_sw_finish *args = data; |
1069 | struct drm_gem_object *obj; | 1199 | struct drm_gem_object *obj; |
1070 | struct drm_i915_gem_object *obj_priv; | ||
1071 | int ret = 0; | 1200 | int ret = 0; |
1072 | 1201 | ||
1073 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1202 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1074 | return -ENODEV; | 1203 | return -ENODEV; |
1075 | 1204 | ||
1076 | mutex_lock(&dev->struct_mutex); | ||
1077 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1205 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
1078 | if (obj == NULL) { | 1206 | if (obj == NULL) |
1079 | mutex_unlock(&dev->struct_mutex); | ||
1080 | return -ENOENT; | 1207 | return -ENOENT; |
1081 | } | ||
1082 | 1208 | ||
1083 | #if WATCH_BUF | 1209 | ret = i915_mutex_lock_interruptible(dev); |
1084 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", | 1210 | if (ret) { |
1085 | __func__, args->handle, obj, obj->size); | 1211 | drm_gem_object_unreference_unlocked(obj); |
1086 | #endif | 1212 | return ret; |
1087 | obj_priv = to_intel_bo(obj); | 1213 | } |
1088 | 1214 | ||
1089 | /* Pinned buffers may be scanout, so flush the cache */ | 1215 | /* Pinned buffers may be scanout, so flush the cache */ |
1090 | if (obj_priv->pin_count) | 1216 | if (to_intel_bo(obj)->pin_count) |
1091 | i915_gem_object_flush_cpu_write_domain(obj); | 1217 | i915_gem_object_flush_cpu_write_domain(obj); |
1092 | 1218 | ||
1093 | drm_gem_object_unreference(obj); | 1219 | drm_gem_object_unreference(obj); |
@@ -1179,7 +1305,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1179 | 1305 | ||
1180 | /* Need a new fence register? */ | 1306 | /* Need a new fence register? */ |
1181 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1307 | if (obj_priv->tiling_mode != I915_TILING_NONE) { |
1182 | ret = i915_gem_object_get_fence_reg(obj); | 1308 | ret = i915_gem_object_get_fence_reg(obj, true); |
1183 | if (ret) | 1309 | if (ret) |
1184 | goto unlock; | 1310 | goto unlock; |
1185 | } | 1311 | } |
@@ -1244,7 +1370,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
1244 | obj->size / PAGE_SIZE, 0, 0); | 1370 | obj->size / PAGE_SIZE, 0, 0); |
1245 | if (!list->file_offset_node) { | 1371 | if (!list->file_offset_node) { |
1246 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 1372 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); |
1247 | ret = -ENOMEM; | 1373 | ret = -ENOSPC; |
1248 | goto out_free_list; | 1374 | goto out_free_list; |
1249 | } | 1375 | } |
1250 | 1376 | ||
@@ -1256,9 +1382,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
1256 | } | 1382 | } |
1257 | 1383 | ||
1258 | list->hash.key = list->file_offset_node->start; | 1384 | list->hash.key = list->file_offset_node->start; |
1259 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { | 1385 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); |
1386 | if (ret) { | ||
1260 | DRM_ERROR("failed to add to map hash\n"); | 1387 | DRM_ERROR("failed to add to map hash\n"); |
1261 | ret = -ENOMEM; | ||
1262 | goto out_free_mm; | 1388 | goto out_free_mm; |
1263 | } | 1389 | } |
1264 | 1390 | ||
@@ -1343,14 +1469,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | |||
1343 | * Minimum alignment is 4k (GTT page size), but might be greater | 1469 | * Minimum alignment is 4k (GTT page size), but might be greater |
1344 | * if a fence register is needed for the object. | 1470 | * if a fence register is needed for the object. |
1345 | */ | 1471 | */ |
1346 | if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) | 1472 | if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) |
1347 | return 4096; | 1473 | return 4096; |
1348 | 1474 | ||
1349 | /* | 1475 | /* |
1350 | * Previous chips need to be aligned to the size of the smallest | 1476 | * Previous chips need to be aligned to the size of the smallest |
1351 | * fence register that can contain the object. | 1477 | * fence register that can contain the object. |
1352 | */ | 1478 | */ |
1353 | if (IS_I9XX(dev)) | 1479 | if (INTEL_INFO(dev)->gen == 3) |
1354 | start = 1024*1024; | 1480 | start = 1024*1024; |
1355 | else | 1481 | else |
1356 | start = 512*1024; | 1482 | start = 512*1024; |
@@ -1392,7 +1518,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1392 | if (obj == NULL) | 1518 | if (obj == NULL) |
1393 | return -ENOENT; | 1519 | return -ENOENT; |
1394 | 1520 | ||
1395 | mutex_lock(&dev->struct_mutex); | 1521 | ret = i915_mutex_lock_interruptible(dev); |
1522 | if (ret) { | ||
1523 | drm_gem_object_unreference_unlocked(obj); | ||
1524 | return ret; | ||
1525 | } | ||
1396 | 1526 | ||
1397 | obj_priv = to_intel_bo(obj); | 1527 | obj_priv = to_intel_bo(obj); |
1398 | 1528 | ||
@@ -1434,7 +1564,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1434 | return 0; | 1564 | return 0; |
1435 | } | 1565 | } |
1436 | 1566 | ||
1437 | void | 1567 | static void |
1438 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1568 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
1439 | { | 1569 | { |
1440 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1570 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
@@ -1468,13 +1598,24 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) | |||
1468 | obj_priv->pages = NULL; | 1598 | obj_priv->pages = NULL; |
1469 | } | 1599 | } |
1470 | 1600 | ||
1601 | static uint32_t | ||
1602 | i915_gem_next_request_seqno(struct drm_device *dev, | ||
1603 | struct intel_ring_buffer *ring) | ||
1604 | { | ||
1605 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1606 | |||
1607 | ring->outstanding_lazy_request = true; | ||
1608 | return dev_priv->next_seqno; | ||
1609 | } | ||
1610 | |||
1471 | static void | 1611 | static void |
1472 | i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, | 1612 | i915_gem_object_move_to_active(struct drm_gem_object *obj, |
1473 | struct intel_ring_buffer *ring) | 1613 | struct intel_ring_buffer *ring) |
1474 | { | 1614 | { |
1475 | struct drm_device *dev = obj->dev; | 1615 | struct drm_device *dev = obj->dev; |
1476 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1477 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1616 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1617 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); | ||
1618 | |||
1478 | BUG_ON(ring == NULL); | 1619 | BUG_ON(ring == NULL); |
1479 | obj_priv->ring = ring; | 1620 | obj_priv->ring = ring; |
1480 | 1621 | ||
@@ -1483,10 +1624,9 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, | |||
1483 | drm_gem_object_reference(obj); | 1624 | drm_gem_object_reference(obj); |
1484 | obj_priv->active = 1; | 1625 | obj_priv->active = 1; |
1485 | } | 1626 | } |
1627 | |||
1486 | /* Move from whatever list we were on to the tail of execution. */ | 1628 | /* Move from whatever list we were on to the tail of execution. */ |
1487 | spin_lock(&dev_priv->mm.active_list_lock); | ||
1488 | list_move_tail(&obj_priv->list, &ring->active_list); | 1629 | list_move_tail(&obj_priv->list, &ring->active_list); |
1489 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
1490 | obj_priv->last_rendering_seqno = seqno; | 1630 | obj_priv->last_rendering_seqno = seqno; |
1491 | } | 1631 | } |
1492 | 1632 | ||
@@ -1536,9 +1676,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1536 | drm_i915_private_t *dev_priv = dev->dev_private; | 1676 | drm_i915_private_t *dev_priv = dev->dev_private; |
1537 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1677 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1538 | 1678 | ||
1539 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
1540 | if (obj_priv->pin_count != 0) | 1679 | if (obj_priv->pin_count != 0) |
1541 | list_del_init(&obj_priv->list); | 1680 | list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); |
1542 | else | 1681 | else |
1543 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1682 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
1544 | 1683 | ||
@@ -1550,12 +1689,12 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1550 | obj_priv->active = 0; | 1689 | obj_priv->active = 0; |
1551 | drm_gem_object_unreference(obj); | 1690 | drm_gem_object_unreference(obj); |
1552 | } | 1691 | } |
1553 | i915_verify_inactive(dev, __FILE__, __LINE__); | 1692 | WARN_ON(i915_verify_lists(dev)); |
1554 | } | 1693 | } |
1555 | 1694 | ||
1556 | static void | 1695 | static void |
1557 | i915_gem_process_flushing_list(struct drm_device *dev, | 1696 | i915_gem_process_flushing_list(struct drm_device *dev, |
1558 | uint32_t flush_domains, uint32_t seqno, | 1697 | uint32_t flush_domains, |
1559 | struct intel_ring_buffer *ring) | 1698 | struct intel_ring_buffer *ring) |
1560 | { | 1699 | { |
1561 | drm_i915_private_t *dev_priv = dev->dev_private; | 1700 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -1566,14 +1705,13 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1566 | gpu_write_list) { | 1705 | gpu_write_list) { |
1567 | struct drm_gem_object *obj = &obj_priv->base; | 1706 | struct drm_gem_object *obj = &obj_priv->base; |
1568 | 1707 | ||
1569 | if ((obj->write_domain & flush_domains) == | 1708 | if (obj->write_domain & flush_domains && |
1570 | obj->write_domain && | 1709 | obj_priv->ring == ring) { |
1571 | obj_priv->ring->ring_flag == ring->ring_flag) { | ||
1572 | uint32_t old_write_domain = obj->write_domain; | 1710 | uint32_t old_write_domain = obj->write_domain; |
1573 | 1711 | ||
1574 | obj->write_domain = 0; | 1712 | obj->write_domain = 0; |
1575 | list_del_init(&obj_priv->gpu_write_list); | 1713 | list_del_init(&obj_priv->gpu_write_list); |
1576 | i915_gem_object_move_to_active(obj, seqno, ring); | 1714 | i915_gem_object_move_to_active(obj, ring); |
1577 | 1715 | ||
1578 | /* update the fence lru list */ | 1716 | /* update the fence lru list */ |
1579 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 1717 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { |
@@ -1591,23 +1729,27 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1591 | } | 1729 | } |
1592 | 1730 | ||
1593 | uint32_t | 1731 | uint32_t |
1594 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | 1732 | i915_add_request(struct drm_device *dev, |
1595 | uint32_t flush_domains, struct intel_ring_buffer *ring) | 1733 | struct drm_file *file, |
1734 | struct drm_i915_gem_request *request, | ||
1735 | struct intel_ring_buffer *ring) | ||
1596 | { | 1736 | { |
1597 | drm_i915_private_t *dev_priv = dev->dev_private; | 1737 | drm_i915_private_t *dev_priv = dev->dev_private; |
1598 | struct drm_i915_file_private *i915_file_priv = NULL; | 1738 | struct drm_i915_file_private *file_priv = NULL; |
1599 | struct drm_i915_gem_request *request; | ||
1600 | uint32_t seqno; | 1739 | uint32_t seqno; |
1601 | int was_empty; | 1740 | int was_empty; |
1602 | 1741 | ||
1603 | if (file_priv != NULL) | 1742 | if (file != NULL) |
1604 | i915_file_priv = file_priv->driver_priv; | 1743 | file_priv = file->driver_priv; |
1605 | 1744 | ||
1606 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 1745 | if (request == NULL) { |
1607 | if (request == NULL) | 1746 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
1608 | return 0; | 1747 | if (request == NULL) |
1748 | return 0; | ||
1749 | } | ||
1609 | 1750 | ||
1610 | seqno = ring->add_request(dev, ring, file_priv, flush_domains); | 1751 | seqno = ring->add_request(dev, ring, 0); |
1752 | ring->outstanding_lazy_request = false; | ||
1611 | 1753 | ||
1612 | request->seqno = seqno; | 1754 | request->seqno = seqno; |
1613 | request->ring = ring; | 1755 | request->ring = ring; |
@@ -1615,23 +1757,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1615 | was_empty = list_empty(&ring->request_list); | 1757 | was_empty = list_empty(&ring->request_list); |
1616 | list_add_tail(&request->list, &ring->request_list); | 1758 | list_add_tail(&request->list, &ring->request_list); |
1617 | 1759 | ||
1618 | if (i915_file_priv) { | 1760 | if (file_priv) { |
1761 | spin_lock(&file_priv->mm.lock); | ||
1762 | request->file_priv = file_priv; | ||
1619 | list_add_tail(&request->client_list, | 1763 | list_add_tail(&request->client_list, |
1620 | &i915_file_priv->mm.request_list); | 1764 | &file_priv->mm.request_list); |
1621 | } else { | 1765 | spin_unlock(&file_priv->mm.lock); |
1622 | INIT_LIST_HEAD(&request->client_list); | ||
1623 | } | 1766 | } |
1624 | 1767 | ||
1625 | /* Associate any objects on the flushing list matching the write | ||
1626 | * domain we're flushing with our flush. | ||
1627 | */ | ||
1628 | if (flush_domains != 0) | ||
1629 | i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); | ||
1630 | |||
1631 | if (!dev_priv->mm.suspended) { | 1768 | if (!dev_priv->mm.suspended) { |
1632 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1769 | mod_timer(&dev_priv->hangcheck_timer, |
1770 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
1633 | if (was_empty) | 1771 | if (was_empty) |
1634 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 1772 | queue_delayed_work(dev_priv->wq, |
1773 | &dev_priv->mm.retire_work, HZ); | ||
1635 | } | 1774 | } |
1636 | return seqno; | 1775 | return seqno; |
1637 | } | 1776 | } |
@@ -1642,91 +1781,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1642 | * Ensures that all commands in the ring are finished | 1781 | * Ensures that all commands in the ring are finished |
1643 | * before signalling the CPU | 1782 | * before signalling the CPU |
1644 | */ | 1783 | */ |
1645 | static uint32_t | 1784 | static void |
1646 | i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) | 1785 | i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) |
1647 | { | 1786 | { |
1648 | uint32_t flush_domains = 0; | 1787 | uint32_t flush_domains = 0; |
1649 | 1788 | ||
1650 | /* The sampler always gets flushed on i965 (sigh) */ | 1789 | /* The sampler always gets flushed on i965 (sigh) */ |
1651 | if (IS_I965G(dev)) | 1790 | if (INTEL_INFO(dev)->gen >= 4) |
1652 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | 1791 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; |
1653 | 1792 | ||
1654 | ring->flush(dev, ring, | 1793 | ring->flush(dev, ring, |
1655 | I915_GEM_DOMAIN_COMMAND, flush_domains); | 1794 | I915_GEM_DOMAIN_COMMAND, flush_domains); |
1656 | return flush_domains; | ||
1657 | } | 1795 | } |
1658 | 1796 | ||
1659 | /** | 1797 | static inline void |
1660 | * Moves buffers associated only with the given active seqno from the active | 1798 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) |
1661 | * to inactive list, potentially freeing them. | ||
1662 | */ | ||
1663 | static void | ||
1664 | i915_gem_retire_request(struct drm_device *dev, | ||
1665 | struct drm_i915_gem_request *request) | ||
1666 | { | 1799 | { |
1667 | drm_i915_private_t *dev_priv = dev->dev_private; | 1800 | struct drm_i915_file_private *file_priv = request->file_priv; |
1668 | 1801 | ||
1669 | trace_i915_gem_request_retire(dev, request->seqno); | 1802 | if (!file_priv) |
1803 | return; | ||
1670 | 1804 | ||
1671 | /* Move any buffers on the active list that are no longer referenced | 1805 | spin_lock(&file_priv->mm.lock); |
1672 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1806 | list_del(&request->client_list); |
1673 | */ | 1807 | request->file_priv = NULL; |
1674 | spin_lock(&dev_priv->mm.active_list_lock); | 1808 | spin_unlock(&file_priv->mm.lock); |
1675 | while (!list_empty(&request->ring->active_list)) { | 1809 | } |
1676 | struct drm_gem_object *obj; | 1810 | |
1811 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | ||
1812 | struct intel_ring_buffer *ring) | ||
1813 | { | ||
1814 | while (!list_empty(&ring->request_list)) { | ||
1815 | struct drm_i915_gem_request *request; | ||
1816 | |||
1817 | request = list_first_entry(&ring->request_list, | ||
1818 | struct drm_i915_gem_request, | ||
1819 | list); | ||
1820 | |||
1821 | list_del(&request->list); | ||
1822 | i915_gem_request_remove_from_client(request); | ||
1823 | kfree(request); | ||
1824 | } | ||
1825 | |||
1826 | while (!list_empty(&ring->active_list)) { | ||
1677 | struct drm_i915_gem_object *obj_priv; | 1827 | struct drm_i915_gem_object *obj_priv; |
1678 | 1828 | ||
1679 | obj_priv = list_first_entry(&request->ring->active_list, | 1829 | obj_priv = list_first_entry(&ring->active_list, |
1680 | struct drm_i915_gem_object, | 1830 | struct drm_i915_gem_object, |
1681 | list); | 1831 | list); |
1682 | obj = &obj_priv->base; | ||
1683 | |||
1684 | /* If the seqno being retired doesn't match the oldest in the | ||
1685 | * list, then the oldest in the list must still be newer than | ||
1686 | * this seqno. | ||
1687 | */ | ||
1688 | if (obj_priv->last_rendering_seqno != request->seqno) | ||
1689 | goto out; | ||
1690 | 1832 | ||
1691 | #if WATCH_LRU | 1833 | obj_priv->base.write_domain = 0; |
1692 | DRM_INFO("%s: retire %d moves to inactive list %p\n", | 1834 | list_del_init(&obj_priv->gpu_write_list); |
1693 | __func__, request->seqno, obj); | 1835 | i915_gem_object_move_to_inactive(&obj_priv->base); |
1694 | #endif | ||
1695 | |||
1696 | if (obj->write_domain != 0) | ||
1697 | i915_gem_object_move_to_flushing(obj); | ||
1698 | else { | ||
1699 | /* Take a reference on the object so it won't be | ||
1700 | * freed while the spinlock is held. The list | ||
1701 | * protection for this spinlock is safe when breaking | ||
1702 | * the lock like this since the next thing we do | ||
1703 | * is just get the head of the list again. | ||
1704 | */ | ||
1705 | drm_gem_object_reference(obj); | ||
1706 | i915_gem_object_move_to_inactive(obj); | ||
1707 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
1708 | drm_gem_object_unreference(obj); | ||
1709 | spin_lock(&dev_priv->mm.active_list_lock); | ||
1710 | } | ||
1711 | } | 1836 | } |
1712 | out: | ||
1713 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
1714 | } | 1837 | } |
1715 | 1838 | ||
1716 | /** | 1839 | void i915_gem_reset(struct drm_device *dev) |
1717 | * Returns true if seq1 is later than seq2. | ||
1718 | */ | ||
1719 | bool | ||
1720 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) | ||
1721 | { | 1840 | { |
1722 | return (int32_t)(seq1 - seq2) >= 0; | 1841 | struct drm_i915_private *dev_priv = dev->dev_private; |
1723 | } | 1842 | struct drm_i915_gem_object *obj_priv; |
1843 | int i; | ||
1724 | 1844 | ||
1725 | uint32_t | 1845 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); |
1726 | i915_get_gem_seqno(struct drm_device *dev, | 1846 | if (HAS_BSD(dev)) |
1727 | struct intel_ring_buffer *ring) | 1847 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); |
1728 | { | 1848 | |
1729 | return ring->get_gem_seqno(dev, ring); | 1849 | /* Remove anything from the flushing lists. The GPU cache is likely |
1850 | * to be lost on reset along with the data, so simply move the | ||
1851 | * lost bo to the inactive list. | ||
1852 | */ | ||
1853 | while (!list_empty(&dev_priv->mm.flushing_list)) { | ||
1854 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | ||
1855 | struct drm_i915_gem_object, | ||
1856 | list); | ||
1857 | |||
1858 | obj_priv->base.write_domain = 0; | ||
1859 | list_del_init(&obj_priv->gpu_write_list); | ||
1860 | i915_gem_object_move_to_inactive(&obj_priv->base); | ||
1861 | } | ||
1862 | |||
1863 | /* Move everything out of the GPU domains to ensure we do any | ||
1864 | * necessary invalidation upon reuse. | ||
1865 | */ | ||
1866 | list_for_each_entry(obj_priv, | ||
1867 | &dev_priv->mm.inactive_list, | ||
1868 | list) | ||
1869 | { | ||
1870 | obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | ||
1871 | } | ||
1872 | |||
1873 | /* The fence registers are invalidated so clear them out */ | ||
1874 | for (i = 0; i < 16; i++) { | ||
1875 | struct drm_i915_fence_reg *reg; | ||
1876 | |||
1877 | reg = &dev_priv->fence_regs[i]; | ||
1878 | if (!reg->obj) | ||
1879 | continue; | ||
1880 | |||
1881 | i915_gem_clear_fence_reg(reg->obj); | ||
1882 | } | ||
1730 | } | 1883 | } |
1731 | 1884 | ||
1732 | /** | 1885 | /** |
@@ -1739,38 +1892,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1739 | drm_i915_private_t *dev_priv = dev->dev_private; | 1892 | drm_i915_private_t *dev_priv = dev->dev_private; |
1740 | uint32_t seqno; | 1893 | uint32_t seqno; |
1741 | 1894 | ||
1742 | if (!ring->status_page.page_addr | 1895 | if (!ring->status_page.page_addr || |
1743 | || list_empty(&ring->request_list)) | 1896 | list_empty(&ring->request_list)) |
1744 | return; | 1897 | return; |
1745 | 1898 | ||
1746 | seqno = i915_get_gem_seqno(dev, ring); | 1899 | WARN_ON(i915_verify_lists(dev)); |
1747 | 1900 | ||
1901 | seqno = ring->get_seqno(dev, ring); | ||
1748 | while (!list_empty(&ring->request_list)) { | 1902 | while (!list_empty(&ring->request_list)) { |
1749 | struct drm_i915_gem_request *request; | 1903 | struct drm_i915_gem_request *request; |
1750 | uint32_t retiring_seqno; | ||
1751 | 1904 | ||
1752 | request = list_first_entry(&ring->request_list, | 1905 | request = list_first_entry(&ring->request_list, |
1753 | struct drm_i915_gem_request, | 1906 | struct drm_i915_gem_request, |
1754 | list); | 1907 | list); |
1755 | retiring_seqno = request->seqno; | ||
1756 | 1908 | ||
1757 | if (i915_seqno_passed(seqno, retiring_seqno) || | 1909 | if (!i915_seqno_passed(seqno, request->seqno)) |
1758 | atomic_read(&dev_priv->mm.wedged)) { | 1910 | break; |
1759 | i915_gem_retire_request(dev, request); | 1911 | |
1912 | trace_i915_gem_request_retire(dev, request->seqno); | ||
1913 | |||
1914 | list_del(&request->list); | ||
1915 | i915_gem_request_remove_from_client(request); | ||
1916 | kfree(request); | ||
1917 | } | ||
1918 | |||
1919 | /* Move any buffers on the active list that are no longer referenced | ||
1920 | * by the ringbuffer to the flushing/inactive lists as appropriate. | ||
1921 | */ | ||
1922 | while (!list_empty(&ring->active_list)) { | ||
1923 | struct drm_gem_object *obj; | ||
1924 | struct drm_i915_gem_object *obj_priv; | ||
1925 | |||
1926 | obj_priv = list_first_entry(&ring->active_list, | ||
1927 | struct drm_i915_gem_object, | ||
1928 | list); | ||
1760 | 1929 | ||
1761 | list_del(&request->list); | 1930 | if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) |
1762 | list_del(&request->client_list); | ||
1763 | kfree(request); | ||
1764 | } else | ||
1765 | break; | 1931 | break; |
1932 | |||
1933 | obj = &obj_priv->base; | ||
1934 | if (obj->write_domain != 0) | ||
1935 | i915_gem_object_move_to_flushing(obj); | ||
1936 | else | ||
1937 | i915_gem_object_move_to_inactive(obj); | ||
1766 | } | 1938 | } |
1767 | 1939 | ||
1768 | if (unlikely (dev_priv->trace_irq_seqno && | 1940 | if (unlikely (dev_priv->trace_irq_seqno && |
1769 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 1941 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { |
1770 | |||
1771 | ring->user_irq_put(dev, ring); | 1942 | ring->user_irq_put(dev, ring); |
1772 | dev_priv->trace_irq_seqno = 0; | 1943 | dev_priv->trace_irq_seqno = 0; |
1773 | } | 1944 | } |
1945 | |||
1946 | WARN_ON(i915_verify_lists(dev)); | ||
1774 | } | 1947 | } |
1775 | 1948 | ||
1776 | void | 1949 | void |
@@ -1797,7 +1970,7 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1797 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | 1970 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); |
1798 | } | 1971 | } |
1799 | 1972 | ||
1800 | void | 1973 | static void |
1801 | i915_gem_retire_work_handler(struct work_struct *work) | 1974 | i915_gem_retire_work_handler(struct work_struct *work) |
1802 | { | 1975 | { |
1803 | drm_i915_private_t *dev_priv; | 1976 | drm_i915_private_t *dev_priv; |
@@ -1807,7 +1980,12 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1807 | mm.retire_work.work); | 1980 | mm.retire_work.work); |
1808 | dev = dev_priv->dev; | 1981 | dev = dev_priv->dev; |
1809 | 1982 | ||
1810 | mutex_lock(&dev->struct_mutex); | 1983 | /* Come back later if the device is busy... */ |
1984 | if (!mutex_trylock(&dev->struct_mutex)) { | ||
1985 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | ||
1986 | return; | ||
1987 | } | ||
1988 | |||
1811 | i915_gem_retire_requests(dev); | 1989 | i915_gem_retire_requests(dev); |
1812 | 1990 | ||
1813 | if (!dev_priv->mm.suspended && | 1991 | if (!dev_priv->mm.suspended && |
@@ -1820,7 +1998,7 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1820 | 1998 | ||
1821 | int | 1999 | int |
1822 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | 2000 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, |
1823 | int interruptible, struct intel_ring_buffer *ring) | 2001 | bool interruptible, struct intel_ring_buffer *ring) |
1824 | { | 2002 | { |
1825 | drm_i915_private_t *dev_priv = dev->dev_private; | 2003 | drm_i915_private_t *dev_priv = dev->dev_private; |
1826 | u32 ier; | 2004 | u32 ier; |
@@ -1829,9 +2007,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1829 | BUG_ON(seqno == 0); | 2007 | BUG_ON(seqno == 0); |
1830 | 2008 | ||
1831 | if (atomic_read(&dev_priv->mm.wedged)) | 2009 | if (atomic_read(&dev_priv->mm.wedged)) |
1832 | return -EIO; | 2010 | return -EAGAIN; |
2011 | |||
2012 | if (ring->outstanding_lazy_request) { | ||
2013 | seqno = i915_add_request(dev, NULL, NULL, ring); | ||
2014 | if (seqno == 0) | ||
2015 | return -ENOMEM; | ||
2016 | } | ||
2017 | BUG_ON(seqno == dev_priv->next_seqno); | ||
1833 | 2018 | ||
1834 | if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { | 2019 | if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { |
1835 | if (HAS_PCH_SPLIT(dev)) | 2020 | if (HAS_PCH_SPLIT(dev)) |
1836 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 2021 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
1837 | else | 2022 | else |
@@ -1850,12 +2035,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1850 | if (interruptible) | 2035 | if (interruptible) |
1851 | ret = wait_event_interruptible(ring->irq_queue, | 2036 | ret = wait_event_interruptible(ring->irq_queue, |
1852 | i915_seqno_passed( | 2037 | i915_seqno_passed( |
1853 | ring->get_gem_seqno(dev, ring), seqno) | 2038 | ring->get_seqno(dev, ring), seqno) |
1854 | || atomic_read(&dev_priv->mm.wedged)); | 2039 | || atomic_read(&dev_priv->mm.wedged)); |
1855 | else | 2040 | else |
1856 | wait_event(ring->irq_queue, | 2041 | wait_event(ring->irq_queue, |
1857 | i915_seqno_passed( | 2042 | i915_seqno_passed( |
1858 | ring->get_gem_seqno(dev, ring), seqno) | 2043 | ring->get_seqno(dev, ring), seqno) |
1859 | || atomic_read(&dev_priv->mm.wedged)); | 2044 | || atomic_read(&dev_priv->mm.wedged)); |
1860 | 2045 | ||
1861 | ring->user_irq_put(dev, ring); | 2046 | ring->user_irq_put(dev, ring); |
@@ -1864,11 +2049,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1864 | trace_i915_gem_request_wait_end(dev, seqno); | 2049 | trace_i915_gem_request_wait_end(dev, seqno); |
1865 | } | 2050 | } |
1866 | if (atomic_read(&dev_priv->mm.wedged)) | 2051 | if (atomic_read(&dev_priv->mm.wedged)) |
1867 | ret = -EIO; | 2052 | ret = -EAGAIN; |
1868 | 2053 | ||
1869 | if (ret && ret != -ERESTARTSYS) | 2054 | if (ret && ret != -ERESTARTSYS) |
1870 | DRM_ERROR("%s returns %d (awaiting %d at %d)\n", | 2055 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", |
1871 | __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); | 2056 | __func__, ret, seqno, ring->get_seqno(dev, ring), |
2057 | dev_priv->next_seqno); | ||
1872 | 2058 | ||
1873 | /* Directly dispatch request retiring. While we have the work queue | 2059 | /* Directly dispatch request retiring. While we have the work queue |
1874 | * to handle this, the waiter on a request often wants an associated | 2060 | * to handle this, the waiter on a request often wants an associated |
@@ -1887,27 +2073,44 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1887 | */ | 2073 | */ |
1888 | static int | 2074 | static int |
1889 | i915_wait_request(struct drm_device *dev, uint32_t seqno, | 2075 | i915_wait_request(struct drm_device *dev, uint32_t seqno, |
1890 | struct intel_ring_buffer *ring) | 2076 | struct intel_ring_buffer *ring) |
1891 | { | 2077 | { |
1892 | return i915_do_wait_request(dev, seqno, 1, ring); | 2078 | return i915_do_wait_request(dev, seqno, 1, ring); |
1893 | } | 2079 | } |
1894 | 2080 | ||
1895 | static void | 2081 | static void |
2082 | i915_gem_flush_ring(struct drm_device *dev, | ||
2083 | struct drm_file *file_priv, | ||
2084 | struct intel_ring_buffer *ring, | ||
2085 | uint32_t invalidate_domains, | ||
2086 | uint32_t flush_domains) | ||
2087 | { | ||
2088 | ring->flush(dev, ring, invalidate_domains, flush_domains); | ||
2089 | i915_gem_process_flushing_list(dev, flush_domains, ring); | ||
2090 | } | ||
2091 | |||
2092 | static void | ||
1896 | i915_gem_flush(struct drm_device *dev, | 2093 | i915_gem_flush(struct drm_device *dev, |
2094 | struct drm_file *file_priv, | ||
1897 | uint32_t invalidate_domains, | 2095 | uint32_t invalidate_domains, |
1898 | uint32_t flush_domains) | 2096 | uint32_t flush_domains, |
2097 | uint32_t flush_rings) | ||
1899 | { | 2098 | { |
1900 | drm_i915_private_t *dev_priv = dev->dev_private; | 2099 | drm_i915_private_t *dev_priv = dev->dev_private; |
2100 | |||
1901 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 2101 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
1902 | drm_agp_chipset_flush(dev); | 2102 | drm_agp_chipset_flush(dev); |
1903 | dev_priv->render_ring.flush(dev, &dev_priv->render_ring, | ||
1904 | invalidate_domains, | ||
1905 | flush_domains); | ||
1906 | 2103 | ||
1907 | if (HAS_BSD(dev)) | 2104 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
1908 | dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, | 2105 | if (flush_rings & RING_RENDER) |
1909 | invalidate_domains, | 2106 | i915_gem_flush_ring(dev, file_priv, |
1910 | flush_domains); | 2107 | &dev_priv->render_ring, |
2108 | invalidate_domains, flush_domains); | ||
2109 | if (flush_rings & RING_BSD) | ||
2110 | i915_gem_flush_ring(dev, file_priv, | ||
2111 | &dev_priv->bsd_ring, | ||
2112 | invalidate_domains, flush_domains); | ||
2113 | } | ||
1911 | } | 2114 | } |
1912 | 2115 | ||
1913 | /** | 2116 | /** |
@@ -1915,7 +2118,8 @@ i915_gem_flush(struct drm_device *dev, | |||
1915 | * safe to unbind from the GTT or access from the CPU. | 2118 | * safe to unbind from the GTT or access from the CPU. |
1916 | */ | 2119 | */ |
1917 | static int | 2120 | static int |
1918 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) | 2121 | i915_gem_object_wait_rendering(struct drm_gem_object *obj, |
2122 | bool interruptible) | ||
1919 | { | 2123 | { |
1920 | struct drm_device *dev = obj->dev; | 2124 | struct drm_device *dev = obj->dev; |
1921 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2125 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
@@ -1930,13 +2134,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) | |||
1930 | * it. | 2134 | * it. |
1931 | */ | 2135 | */ |
1932 | if (obj_priv->active) { | 2136 | if (obj_priv->active) { |
1933 | #if WATCH_BUF | 2137 | ret = i915_do_wait_request(dev, |
1934 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 2138 | obj_priv->last_rendering_seqno, |
1935 | __func__, obj, obj_priv->last_rendering_seqno); | 2139 | interruptible, |
1936 | #endif | 2140 | obj_priv->ring); |
1937 | ret = i915_wait_request(dev, | 2141 | if (ret) |
1938 | obj_priv->last_rendering_seqno, obj_priv->ring); | ||
1939 | if (ret != 0) | ||
1940 | return ret; | 2142 | return ret; |
1941 | } | 2143 | } |
1942 | 2144 | ||
@@ -1950,14 +2152,10 @@ int | |||
1950 | i915_gem_object_unbind(struct drm_gem_object *obj) | 2152 | i915_gem_object_unbind(struct drm_gem_object *obj) |
1951 | { | 2153 | { |
1952 | struct drm_device *dev = obj->dev; | 2154 | struct drm_device *dev = obj->dev; |
1953 | drm_i915_private_t *dev_priv = dev->dev_private; | 2155 | struct drm_i915_private *dev_priv = dev->dev_private; |
1954 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2156 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1955 | int ret = 0; | 2157 | int ret = 0; |
1956 | 2158 | ||
1957 | #if WATCH_BUF | ||
1958 | DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); | ||
1959 | DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); | ||
1960 | #endif | ||
1961 | if (obj_priv->gtt_space == NULL) | 2159 | if (obj_priv->gtt_space == NULL) |
1962 | return 0; | 2160 | return 0; |
1963 | 2161 | ||
@@ -1982,33 +2180,26 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1982 | * should be safe and we need to cleanup or else we might | 2180 | * should be safe and we need to cleanup or else we might |
1983 | * cause memory corruption through use-after-free. | 2181 | * cause memory corruption through use-after-free. |
1984 | */ | 2182 | */ |
2183 | if (ret) { | ||
2184 | i915_gem_clflush_object(obj); | ||
2185 | obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; | ||
2186 | } | ||
1985 | 2187 | ||
1986 | /* release the fence reg _after_ flushing */ | 2188 | /* release the fence reg _after_ flushing */ |
1987 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 2189 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) |
1988 | i915_gem_clear_fence_reg(obj); | 2190 | i915_gem_clear_fence_reg(obj); |
1989 | 2191 | ||
1990 | if (obj_priv->agp_mem != NULL) { | 2192 | drm_unbind_agp(obj_priv->agp_mem); |
1991 | drm_unbind_agp(obj_priv->agp_mem); | 2193 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
1992 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | ||
1993 | obj_priv->agp_mem = NULL; | ||
1994 | } | ||
1995 | 2194 | ||
1996 | i915_gem_object_put_pages(obj); | 2195 | i915_gem_object_put_pages(obj); |
1997 | BUG_ON(obj_priv->pages_refcount); | 2196 | BUG_ON(obj_priv->pages_refcount); |
1998 | 2197 | ||
1999 | if (obj_priv->gtt_space) { | 2198 | i915_gem_info_remove_gtt(dev_priv, obj->size); |
2000 | atomic_dec(&dev->gtt_count); | 2199 | list_del_init(&obj_priv->list); |
2001 | atomic_sub(obj->size, &dev->gtt_memory); | ||
2002 | 2200 | ||
2003 | drm_mm_put_block(obj_priv->gtt_space); | 2201 | drm_mm_put_block(obj_priv->gtt_space); |
2004 | obj_priv->gtt_space = NULL; | 2202 | obj_priv->gtt_space = NULL; |
2005 | } | ||
2006 | |||
2007 | /* Remove ourselves from the LRU list if present. */ | ||
2008 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2009 | if (!list_empty(&obj_priv->list)) | ||
2010 | list_del_init(&obj_priv->list); | ||
2011 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2012 | 2203 | ||
2013 | if (i915_gem_object_is_purgeable(obj_priv)) | 2204 | if (i915_gem_object_is_purgeable(obj_priv)) |
2014 | i915_gem_object_truncate(obj); | 2205 | i915_gem_object_truncate(obj); |
@@ -2018,48 +2209,45 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2018 | return ret; | 2209 | return ret; |
2019 | } | 2210 | } |
2020 | 2211 | ||
2212 | static int i915_ring_idle(struct drm_device *dev, | ||
2213 | struct intel_ring_buffer *ring) | ||
2214 | { | ||
2215 | i915_gem_flush_ring(dev, NULL, ring, | ||
2216 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2217 | return i915_wait_request(dev, | ||
2218 | i915_gem_next_request_seqno(dev, ring), | ||
2219 | ring); | ||
2220 | } | ||
2221 | |||
2021 | int | 2222 | int |
2022 | i915_gpu_idle(struct drm_device *dev) | 2223 | i915_gpu_idle(struct drm_device *dev) |
2023 | { | 2224 | { |
2024 | drm_i915_private_t *dev_priv = dev->dev_private; | 2225 | drm_i915_private_t *dev_priv = dev->dev_private; |
2025 | bool lists_empty; | 2226 | bool lists_empty; |
2026 | uint32_t seqno1, seqno2; | ||
2027 | int ret; | 2227 | int ret; |
2028 | 2228 | ||
2029 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2030 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 2229 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
2031 | list_empty(&dev_priv->render_ring.active_list) && | 2230 | list_empty(&dev_priv->render_ring.active_list) && |
2032 | (!HAS_BSD(dev) || | 2231 | (!HAS_BSD(dev) || |
2033 | list_empty(&dev_priv->bsd_ring.active_list))); | 2232 | list_empty(&dev_priv->bsd_ring.active_list))); |
2034 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2035 | |||
2036 | if (lists_empty) | 2233 | if (lists_empty) |
2037 | return 0; | 2234 | return 0; |
2038 | 2235 | ||
2039 | /* Flush everything onto the inactive list. */ | 2236 | /* Flush everything onto the inactive list. */ |
2040 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2237 | ret = i915_ring_idle(dev, &dev_priv->render_ring); |
2041 | seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, | 2238 | if (ret) |
2042 | &dev_priv->render_ring); | 2239 | return ret; |
2043 | if (seqno1 == 0) | ||
2044 | return -ENOMEM; | ||
2045 | ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); | ||
2046 | 2240 | ||
2047 | if (HAS_BSD(dev)) { | 2241 | if (HAS_BSD(dev)) { |
2048 | seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, | 2242 | ret = i915_ring_idle(dev, &dev_priv->bsd_ring); |
2049 | &dev_priv->bsd_ring); | ||
2050 | if (seqno2 == 0) | ||
2051 | return -ENOMEM; | ||
2052 | |||
2053 | ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); | ||
2054 | if (ret) | 2243 | if (ret) |
2055 | return ret; | 2244 | return ret; |
2056 | } | 2245 | } |
2057 | 2246 | ||
2058 | 2247 | return 0; | |
2059 | return ret; | ||
2060 | } | 2248 | } |
2061 | 2249 | ||
2062 | int | 2250 | static int |
2063 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 2251 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2064 | gfp_t gfpmask) | 2252 | gfp_t gfpmask) |
2065 | { | 2253 | { |
@@ -2239,7 +2427,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2239 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | 2427 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); |
2240 | } | 2428 | } |
2241 | 2429 | ||
2242 | static int i915_find_fence_reg(struct drm_device *dev) | 2430 | static int i915_find_fence_reg(struct drm_device *dev, |
2431 | bool interruptible) | ||
2243 | { | 2432 | { |
2244 | struct drm_i915_fence_reg *reg = NULL; | 2433 | struct drm_i915_fence_reg *reg = NULL; |
2245 | struct drm_i915_gem_object *obj_priv = NULL; | 2434 | struct drm_i915_gem_object *obj_priv = NULL; |
@@ -2284,7 +2473,7 @@ static int i915_find_fence_reg(struct drm_device *dev) | |||
2284 | * private reference to obj like the other callers of put_fence_reg | 2473 | * private reference to obj like the other callers of put_fence_reg |
2285 | * (set_tiling ioctl) do. */ | 2474 | * (set_tiling ioctl) do. */ |
2286 | drm_gem_object_reference(obj); | 2475 | drm_gem_object_reference(obj); |
2287 | ret = i915_gem_object_put_fence_reg(obj); | 2476 | ret = i915_gem_object_put_fence_reg(obj, interruptible); |
2288 | drm_gem_object_unreference(obj); | 2477 | drm_gem_object_unreference(obj); |
2289 | if (ret != 0) | 2478 | if (ret != 0) |
2290 | return ret; | 2479 | return ret; |
@@ -2306,7 +2495,8 @@ static int i915_find_fence_reg(struct drm_device *dev) | |||
2306 | * and tiling format. | 2495 | * and tiling format. |
2307 | */ | 2496 | */ |
2308 | int | 2497 | int |
2309 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | 2498 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, |
2499 | bool interruptible) | ||
2310 | { | 2500 | { |
2311 | struct drm_device *dev = obj->dev; | 2501 | struct drm_device *dev = obj->dev; |
2312 | struct drm_i915_private *dev_priv = dev->dev_private; | 2502 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2341,7 +2531,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2341 | break; | 2531 | break; |
2342 | } | 2532 | } |
2343 | 2533 | ||
2344 | ret = i915_find_fence_reg(dev); | 2534 | ret = i915_find_fence_reg(dev, interruptible); |
2345 | if (ret < 0) | 2535 | if (ret < 0) |
2346 | return ret; | 2536 | return ret; |
2347 | 2537 | ||
@@ -2400,7 +2590,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2400 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2590 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); |
2401 | break; | 2591 | break; |
2402 | case 3: | 2592 | case 3: |
2403 | if (obj_priv->fence_reg > 8) | 2593 | if (obj_priv->fence_reg >= 8) |
2404 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; | 2594 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; |
2405 | else | 2595 | else |
2406 | case 2: | 2596 | case 2: |
@@ -2419,15 +2609,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2419 | * i915_gem_object_put_fence_reg - waits on outstanding fenced access | 2609 | * i915_gem_object_put_fence_reg - waits on outstanding fenced access |
2420 | * to the buffer to finish, and then resets the fence register. | 2610 | * to the buffer to finish, and then resets the fence register. |
2421 | * @obj: tiled object holding a fence register. | 2611 | * @obj: tiled object holding a fence register. |
2612 | * @bool: whether the wait upon the fence is interruptible | ||
2422 | * | 2613 | * |
2423 | * Zeroes out the fence register itself and clears out the associated | 2614 | * Zeroes out the fence register itself and clears out the associated |
2424 | * data structures in dev_priv and obj_priv. | 2615 | * data structures in dev_priv and obj_priv. |
2425 | */ | 2616 | */ |
2426 | int | 2617 | int |
2427 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | 2618 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj, |
2619 | bool interruptible) | ||
2428 | { | 2620 | { |
2429 | struct drm_device *dev = obj->dev; | 2621 | struct drm_device *dev = obj->dev; |
2622 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2430 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2623 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2624 | struct drm_i915_fence_reg *reg; | ||
2431 | 2625 | ||
2432 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2626 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) |
2433 | return 0; | 2627 | return 0; |
@@ -2442,20 +2636,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | |||
2442 | * therefore we must wait for any outstanding access to complete | 2636 | * therefore we must wait for any outstanding access to complete |
2443 | * before clearing the fence. | 2637 | * before clearing the fence. |
2444 | */ | 2638 | */ |
2445 | if (!IS_I965G(dev)) { | 2639 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; |
2640 | if (reg->gpu) { | ||
2446 | int ret; | 2641 | int ret; |
2447 | 2642 | ||
2448 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2643 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); |
2449 | if (ret != 0) | 2644 | if (ret) |
2450 | return ret; | 2645 | return ret; |
2451 | 2646 | ||
2452 | ret = i915_gem_object_wait_rendering(obj); | 2647 | ret = i915_gem_object_wait_rendering(obj, interruptible); |
2453 | if (ret != 0) | 2648 | if (ret) |
2454 | return ret; | 2649 | return ret; |
2650 | |||
2651 | reg->gpu = false; | ||
2455 | } | 2652 | } |
2456 | 2653 | ||
2457 | i915_gem_object_flush_gtt_write_domain(obj); | 2654 | i915_gem_object_flush_gtt_write_domain(obj); |
2458 | i915_gem_clear_fence_reg (obj); | 2655 | i915_gem_clear_fence_reg(obj); |
2459 | 2656 | ||
2460 | return 0; | 2657 | return 0; |
2461 | } | 2658 | } |
@@ -2488,7 +2685,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2488 | /* If the object is bigger than the entire aperture, reject it early | 2685 | /* If the object is bigger than the entire aperture, reject it early |
2489 | * before evicting everything in a vain attempt to find space. | 2686 | * before evicting everything in a vain attempt to find space. |
2490 | */ | 2687 | */ |
2491 | if (obj->size > dev->gtt_total) { | 2688 | if (obj->size > dev_priv->mm.gtt_total) { |
2492 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 2689 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2493 | return -E2BIG; | 2690 | return -E2BIG; |
2494 | } | 2691 | } |
@@ -2506,9 +2703,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2506 | /* If the gtt is empty and we're still having trouble | 2703 | /* If the gtt is empty and we're still having trouble |
2507 | * fitting our object in, we're out of memory. | 2704 | * fitting our object in, we're out of memory. |
2508 | */ | 2705 | */ |
2509 | #if WATCH_LRU | ||
2510 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | ||
2511 | #endif | ||
2512 | ret = i915_gem_evict_something(dev, obj->size, alignment); | 2706 | ret = i915_gem_evict_something(dev, obj->size, alignment); |
2513 | if (ret) | 2707 | if (ret) |
2514 | return ret; | 2708 | return ret; |
@@ -2516,10 +2710,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2516 | goto search_free; | 2710 | goto search_free; |
2517 | } | 2711 | } |
2518 | 2712 | ||
2519 | #if WATCH_BUF | ||
2520 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | ||
2521 | obj->size, obj_priv->gtt_offset); | ||
2522 | #endif | ||
2523 | ret = i915_gem_object_get_pages(obj, gfpmask); | 2713 | ret = i915_gem_object_get_pages(obj, gfpmask); |
2524 | if (ret) { | 2714 | if (ret) { |
2525 | drm_mm_put_block(obj_priv->gtt_space); | 2715 | drm_mm_put_block(obj_priv->gtt_space); |
@@ -2564,11 +2754,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2564 | 2754 | ||
2565 | goto search_free; | 2755 | goto search_free; |
2566 | } | 2756 | } |
2567 | atomic_inc(&dev->gtt_count); | ||
2568 | atomic_add(obj->size, &dev->gtt_memory); | ||
2569 | 2757 | ||
2570 | /* keep track of bounds object by adding it to the inactive list */ | 2758 | /* keep track of bounds object by adding it to the inactive list */ |
2571 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 2759 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
2760 | i915_gem_info_add_gtt(dev_priv, obj->size); | ||
2572 | 2761 | ||
2573 | /* Assert that the object is not currently in any GPU domain. As it | 2762 | /* Assert that the object is not currently in any GPU domain. As it |
2574 | * wasn't in the GTT, there shouldn't be any way it could have been in | 2763 | * wasn't in the GTT, there shouldn't be any way it could have been in |
@@ -2601,25 +2790,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
2601 | 2790 | ||
2602 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2791 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2603 | static int | 2792 | static int |
2604 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | 2793 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, |
2794 | bool pipelined) | ||
2605 | { | 2795 | { |
2606 | struct drm_device *dev = obj->dev; | 2796 | struct drm_device *dev = obj->dev; |
2607 | uint32_t old_write_domain; | 2797 | uint32_t old_write_domain; |
2608 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2609 | 2798 | ||
2610 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2799 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2611 | return 0; | 2800 | return 0; |
2612 | 2801 | ||
2613 | /* Queue the GPU write cache flushing we need. */ | 2802 | /* Queue the GPU write cache flushing we need. */ |
2614 | old_write_domain = obj->write_domain; | 2803 | old_write_domain = obj->write_domain; |
2615 | i915_gem_flush(dev, 0, obj->write_domain); | 2804 | i915_gem_flush_ring(dev, NULL, |
2616 | if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) | 2805 | to_intel_bo(obj)->ring, |
2617 | return -ENOMEM; | 2806 | 0, obj->write_domain); |
2807 | BUG_ON(obj->write_domain); | ||
2618 | 2808 | ||
2619 | trace_i915_gem_object_change_domain(obj, | 2809 | trace_i915_gem_object_change_domain(obj, |
2620 | obj->read_domains, | 2810 | obj->read_domains, |
2621 | old_write_domain); | 2811 | old_write_domain); |
2622 | return 0; | 2812 | |
2813 | if (pipelined) | ||
2814 | return 0; | ||
2815 | |||
2816 | return i915_gem_object_wait_rendering(obj, true); | ||
2623 | } | 2817 | } |
2624 | 2818 | ||
2625 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2819 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2663,26 +2857,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2663 | old_write_domain); | 2857 | old_write_domain); |
2664 | } | 2858 | } |
2665 | 2859 | ||
2666 | int | ||
2667 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | ||
2668 | { | ||
2669 | int ret = 0; | ||
2670 | |||
2671 | switch (obj->write_domain) { | ||
2672 | case I915_GEM_DOMAIN_GTT: | ||
2673 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2674 | break; | ||
2675 | case I915_GEM_DOMAIN_CPU: | ||
2676 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2677 | break; | ||
2678 | default: | ||
2679 | ret = i915_gem_object_flush_gpu_write_domain(obj); | ||
2680 | break; | ||
2681 | } | ||
2682 | |||
2683 | return ret; | ||
2684 | } | ||
2685 | |||
2686 | /** | 2860 | /** |
2687 | * Moves a single object to the GTT read, and possibly write domain. | 2861 | * Moves a single object to the GTT read, and possibly write domain. |
2688 | * | 2862 | * |
@@ -2700,32 +2874,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2700 | if (obj_priv->gtt_space == NULL) | 2874 | if (obj_priv->gtt_space == NULL) |
2701 | return -EINVAL; | 2875 | return -EINVAL; |
2702 | 2876 | ||
2703 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2877 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); |
2704 | if (ret != 0) | 2878 | if (ret != 0) |
2705 | return ret; | 2879 | return ret; |
2706 | 2880 | ||
2707 | /* Wait on any GPU rendering and flushing to occur. */ | 2881 | i915_gem_object_flush_cpu_write_domain(obj); |
2708 | ret = i915_gem_object_wait_rendering(obj); | 2882 | |
2709 | if (ret != 0) | 2883 | if (write) { |
2710 | return ret; | 2884 | ret = i915_gem_object_wait_rendering(obj, true); |
2885 | if (ret) | ||
2886 | return ret; | ||
2887 | } | ||
2711 | 2888 | ||
2712 | old_write_domain = obj->write_domain; | 2889 | old_write_domain = obj->write_domain; |
2713 | old_read_domains = obj->read_domains; | 2890 | old_read_domains = obj->read_domains; |
2714 | 2891 | ||
2715 | /* If we're writing through the GTT domain, then CPU and GPU caches | ||
2716 | * will need to be invalidated at next use. | ||
2717 | */ | ||
2718 | if (write) | ||
2719 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
2720 | |||
2721 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2722 | |||
2723 | /* It should now be out of any other write domains, and we can update | 2892 | /* It should now be out of any other write domains, and we can update |
2724 | * the domain values for our changes. | 2893 | * the domain values for our changes. |
2725 | */ | 2894 | */ |
2726 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 2895 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
2727 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2896 | obj->read_domains |= I915_GEM_DOMAIN_GTT; |
2728 | if (write) { | 2897 | if (write) { |
2898 | obj->read_domains = I915_GEM_DOMAIN_GTT; | ||
2729 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 2899 | obj->write_domain = I915_GEM_DOMAIN_GTT; |
2730 | obj_priv->dirty = 1; | 2900 | obj_priv->dirty = 1; |
2731 | } | 2901 | } |
@@ -2742,51 +2912,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2742 | * wait, as in modesetting process we're not supposed to be interrupted. | 2912 | * wait, as in modesetting process we're not supposed to be interrupted. |
2743 | */ | 2913 | */ |
2744 | int | 2914 | int |
2745 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | 2915 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, |
2916 | bool pipelined) | ||
2746 | { | 2917 | { |
2747 | struct drm_device *dev = obj->dev; | ||
2748 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2918 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2749 | uint32_t old_write_domain, old_read_domains; | 2919 | uint32_t old_read_domains; |
2750 | int ret; | 2920 | int ret; |
2751 | 2921 | ||
2752 | /* Not valid to be called on unbound objects. */ | 2922 | /* Not valid to be called on unbound objects. */ |
2753 | if (obj_priv->gtt_space == NULL) | 2923 | if (obj_priv->gtt_space == NULL) |
2754 | return -EINVAL; | 2924 | return -EINVAL; |
2755 | 2925 | ||
2756 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2926 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); |
2757 | if (ret) | 2927 | if (ret) |
2758 | return ret; | 2928 | return ret; |
2759 | 2929 | ||
2760 | /* Wait on any GPU rendering and flushing to occur. */ | 2930 | /* Currently, we are always called from an non-interruptible context. */ |
2761 | if (obj_priv->active) { | 2931 | if (!pipelined) { |
2762 | #if WATCH_BUF | 2932 | ret = i915_gem_object_wait_rendering(obj, false); |
2763 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 2933 | if (ret) |
2764 | __func__, obj, obj_priv->last_rendering_seqno); | ||
2765 | #endif | ||
2766 | ret = i915_do_wait_request(dev, | ||
2767 | obj_priv->last_rendering_seqno, | ||
2768 | 0, | ||
2769 | obj_priv->ring); | ||
2770 | if (ret != 0) | ||
2771 | return ret; | 2934 | return ret; |
2772 | } | 2935 | } |
2773 | 2936 | ||
2774 | i915_gem_object_flush_cpu_write_domain(obj); | 2937 | i915_gem_object_flush_cpu_write_domain(obj); |
2775 | 2938 | ||
2776 | old_write_domain = obj->write_domain; | ||
2777 | old_read_domains = obj->read_domains; | 2939 | old_read_domains = obj->read_domains; |
2778 | 2940 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | |
2779 | /* It should now be out of any other write domains, and we can update | ||
2780 | * the domain values for our changes. | ||
2781 | */ | ||
2782 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
2783 | obj->read_domains = I915_GEM_DOMAIN_GTT; | ||
2784 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
2785 | obj_priv->dirty = 1; | ||
2786 | 2941 | ||
2787 | trace_i915_gem_object_change_domain(obj, | 2942 | trace_i915_gem_object_change_domain(obj, |
2788 | old_read_domains, | 2943 | old_read_domains, |
2789 | old_write_domain); | 2944 | obj->write_domain); |
2790 | 2945 | ||
2791 | return 0; | 2946 | return 0; |
2792 | } | 2947 | } |
@@ -2803,12 +2958,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2803 | uint32_t old_write_domain, old_read_domains; | 2958 | uint32_t old_write_domain, old_read_domains; |
2804 | int ret; | 2959 | int ret; |
2805 | 2960 | ||
2806 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2961 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); |
2807 | if (ret) | ||
2808 | return ret; | ||
2809 | |||
2810 | /* Wait on any GPU rendering and flushing to occur. */ | ||
2811 | ret = i915_gem_object_wait_rendering(obj); | ||
2812 | if (ret != 0) | 2962 | if (ret != 0) |
2813 | return ret; | 2963 | return ret; |
2814 | 2964 | ||
@@ -2819,6 +2969,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2819 | */ | 2969 | */ |
2820 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 2970 | i915_gem_object_set_to_full_cpu_read_domain(obj); |
2821 | 2971 | ||
2972 | if (write) { | ||
2973 | ret = i915_gem_object_wait_rendering(obj, true); | ||
2974 | if (ret) | ||
2975 | return ret; | ||
2976 | } | ||
2977 | |||
2822 | old_write_domain = obj->write_domain; | 2978 | old_write_domain = obj->write_domain; |
2823 | old_read_domains = obj->read_domains; | 2979 | old_read_domains = obj->read_domains; |
2824 | 2980 | ||
@@ -2838,7 +2994,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2838 | * need to be invalidated at next use. | 2994 | * need to be invalidated at next use. |
2839 | */ | 2995 | */ |
2840 | if (write) { | 2996 | if (write) { |
2841 | obj->read_domains &= I915_GEM_DOMAIN_CPU; | 2997 | obj->read_domains = I915_GEM_DOMAIN_CPU; |
2842 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 2998 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
2843 | } | 2999 | } |
2844 | 3000 | ||
@@ -2964,7 +3120,7 @@ static void | |||
2964 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 3120 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
2965 | { | 3121 | { |
2966 | struct drm_device *dev = obj->dev; | 3122 | struct drm_device *dev = obj->dev; |
2967 | drm_i915_private_t *dev_priv = dev->dev_private; | 3123 | struct drm_i915_private *dev_priv = dev->dev_private; |
2968 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3124 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2969 | uint32_t invalidate_domains = 0; | 3125 | uint32_t invalidate_domains = 0; |
2970 | uint32_t flush_domains = 0; | 3126 | uint32_t flush_domains = 0; |
@@ -2975,12 +3131,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2975 | 3131 | ||
2976 | intel_mark_busy(dev, obj); | 3132 | intel_mark_busy(dev, obj); |
2977 | 3133 | ||
2978 | #if WATCH_BUF | ||
2979 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | ||
2980 | __func__, obj, | ||
2981 | obj->read_domains, obj->pending_read_domains, | ||
2982 | obj->write_domain, obj->pending_write_domain); | ||
2983 | #endif | ||
2984 | /* | 3134 | /* |
2985 | * If the object isn't moving to a new write domain, | 3135 | * If the object isn't moving to a new write domain, |
2986 | * let the object stay in multiple read domains | 3136 | * let the object stay in multiple read domains |
@@ -3007,13 +3157,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3007 | * stale data. That is, any new read domains. | 3157 | * stale data. That is, any new read domains. |
3008 | */ | 3158 | */ |
3009 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; | 3159 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; |
3010 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | 3160 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) |
3011 | #if WATCH_BUF | ||
3012 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | ||
3013 | __func__, flush_domains, invalidate_domains); | ||
3014 | #endif | ||
3015 | i915_gem_clflush_object(obj); | 3161 | i915_gem_clflush_object(obj); |
3016 | } | ||
3017 | 3162 | ||
3018 | old_read_domains = obj->read_domains; | 3163 | old_read_domains = obj->read_domains; |
3019 | 3164 | ||
@@ -3027,21 +3172,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3027 | obj->pending_write_domain = obj->write_domain; | 3172 | obj->pending_write_domain = obj->write_domain; |
3028 | obj->read_domains = obj->pending_read_domains; | 3173 | obj->read_domains = obj->pending_read_domains; |
3029 | 3174 | ||
3030 | if (flush_domains & I915_GEM_GPU_DOMAINS) { | ||
3031 | if (obj_priv->ring == &dev_priv->render_ring) | ||
3032 | dev_priv->flush_rings |= FLUSH_RENDER_RING; | ||
3033 | else if (obj_priv->ring == &dev_priv->bsd_ring) | ||
3034 | dev_priv->flush_rings |= FLUSH_BSD_RING; | ||
3035 | } | ||
3036 | |||
3037 | dev->invalidate_domains |= invalidate_domains; | 3175 | dev->invalidate_domains |= invalidate_domains; |
3038 | dev->flush_domains |= flush_domains; | 3176 | dev->flush_domains |= flush_domains; |
3039 | #if WATCH_BUF | 3177 | if (obj_priv->ring) |
3040 | DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", | 3178 | dev_priv->mm.flush_rings |= obj_priv->ring->id; |
3041 | __func__, | ||
3042 | obj->read_domains, obj->write_domain, | ||
3043 | dev->invalidate_domains, dev->flush_domains); | ||
3044 | #endif | ||
3045 | 3179 | ||
3046 | trace_i915_gem_object_change_domain(obj, | 3180 | trace_i915_gem_object_change_domain(obj, |
3047 | old_read_domains, | 3181 | old_read_domains, |
@@ -3104,12 +3238,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3104 | if (offset == 0 && size == obj->size) | 3238 | if (offset == 0 && size == obj->size) |
3105 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3239 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3106 | 3240 | ||
3107 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3241 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); |
3108 | if (ret) | ||
3109 | return ret; | ||
3110 | |||
3111 | /* Wait on any GPU rendering and flushing to occur. */ | ||
3112 | ret = i915_gem_object_wait_rendering(obj); | ||
3113 | if (ret != 0) | 3242 | if (ret != 0) |
3114 | return ret; | 3243 | return ret; |
3115 | i915_gem_object_flush_gtt_write_domain(obj); | 3244 | i915_gem_object_flush_gtt_write_domain(obj); |
@@ -3196,11 +3325,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3196 | * properly handle blits to/from tiled surfaces. | 3325 | * properly handle blits to/from tiled surfaces. |
3197 | */ | 3326 | */ |
3198 | if (need_fence) { | 3327 | if (need_fence) { |
3199 | ret = i915_gem_object_get_fence_reg(obj); | 3328 | ret = i915_gem_object_get_fence_reg(obj, true); |
3200 | if (ret != 0) { | 3329 | if (ret != 0) { |
3201 | i915_gem_object_unpin(obj); | 3330 | i915_gem_object_unpin(obj); |
3202 | return ret; | 3331 | return ret; |
3203 | } | 3332 | } |
3333 | |||
3334 | dev_priv->fence_regs[obj_priv->fence_reg].gpu = true; | ||
3204 | } | 3335 | } |
3205 | 3336 | ||
3206 | entry->offset = obj_priv->gtt_offset; | 3337 | entry->offset = obj_priv->gtt_offset; |
@@ -3258,6 +3389,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3258 | (int) reloc->offset, | 3389 | (int) reloc->offset, |
3259 | reloc->read_domains, | 3390 | reloc->read_domains, |
3260 | reloc->write_domain); | 3391 | reloc->write_domain); |
3392 | drm_gem_object_unreference(target_obj); | ||
3393 | i915_gem_object_unpin(obj); | ||
3261 | return -EINVAL; | 3394 | return -EINVAL; |
3262 | } | 3395 | } |
3263 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 3396 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
@@ -3333,7 +3466,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3333 | if (ret != 0) { | 3466 | if (ret != 0) { |
3334 | drm_gem_object_unreference(target_obj); | 3467 | drm_gem_object_unreference(target_obj); |
3335 | i915_gem_object_unpin(obj); | 3468 | i915_gem_object_unpin(obj); |
3336 | return -EINVAL; | 3469 | return ret; |
3337 | } | 3470 | } |
3338 | 3471 | ||
3339 | /* Map the page containing the relocation we're going to | 3472 | /* Map the page containing the relocation we're going to |
@@ -3348,11 +3481,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3348 | (reloc_offset & (PAGE_SIZE - 1))); | 3481 | (reloc_offset & (PAGE_SIZE - 1))); |
3349 | reloc_val = target_obj_priv->gtt_offset + reloc->delta; | 3482 | reloc_val = target_obj_priv->gtt_offset + reloc->delta; |
3350 | 3483 | ||
3351 | #if WATCH_BUF | ||
3352 | DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", | ||
3353 | obj, (unsigned int) reloc->offset, | ||
3354 | readl(reloc_entry), reloc_val); | ||
3355 | #endif | ||
3356 | writel(reloc_val, reloc_entry); | 3484 | writel(reloc_val, reloc_entry); |
3357 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | 3485 | io_mapping_unmap_atomic(reloc_page, KM_USER0); |
3358 | 3486 | ||
@@ -3364,10 +3492,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3364 | drm_gem_object_unreference(target_obj); | 3492 | drm_gem_object_unreference(target_obj); |
3365 | } | 3493 | } |
3366 | 3494 | ||
3367 | #if WATCH_BUF | ||
3368 | if (0) | ||
3369 | i915_gem_dump_object(obj, 128, __func__, ~0); | ||
3370 | #endif | ||
3371 | return 0; | 3495 | return 0; |
3372 | } | 3496 | } |
3373 | 3497 | ||
@@ -3382,28 +3506,48 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3382 | * relatively low latency when blocking on a particular request to finish. | 3506 | * relatively low latency when blocking on a particular request to finish. |
3383 | */ | 3507 | */ |
3384 | static int | 3508 | static int |
3385 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | 3509 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
3386 | { | 3510 | { |
3387 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 3511 | struct drm_i915_private *dev_priv = dev->dev_private; |
3388 | int ret = 0; | 3512 | struct drm_i915_file_private *file_priv = file->driver_priv; |
3389 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); | 3513 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); |
3514 | struct drm_i915_gem_request *request; | ||
3515 | struct intel_ring_buffer *ring = NULL; | ||
3516 | u32 seqno = 0; | ||
3517 | int ret; | ||
3390 | 3518 | ||
3391 | mutex_lock(&dev->struct_mutex); | 3519 | spin_lock(&file_priv->mm.lock); |
3392 | while (!list_empty(&i915_file_priv->mm.request_list)) { | 3520 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
3393 | struct drm_i915_gem_request *request; | ||
3394 | |||
3395 | request = list_first_entry(&i915_file_priv->mm.request_list, | ||
3396 | struct drm_i915_gem_request, | ||
3397 | client_list); | ||
3398 | |||
3399 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 3521 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
3400 | break; | 3522 | break; |
3401 | 3523 | ||
3402 | ret = i915_wait_request(dev, request->seqno, request->ring); | 3524 | ring = request->ring; |
3403 | if (ret != 0) | 3525 | seqno = request->seqno; |
3404 | break; | ||
3405 | } | 3526 | } |
3406 | mutex_unlock(&dev->struct_mutex); | 3527 | spin_unlock(&file_priv->mm.lock); |
3528 | |||
3529 | if (seqno == 0) | ||
3530 | return 0; | ||
3531 | |||
3532 | ret = 0; | ||
3533 | if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { | ||
3534 | /* And wait for the seqno passing without holding any locks and | ||
3535 | * causing extra latency for others. This is safe as the irq | ||
3536 | * generation is designed to be run atomically and so is | ||
3537 | * lockless. | ||
3538 | */ | ||
3539 | ring->user_irq_get(dev, ring); | ||
3540 | ret = wait_event_interruptible(ring->irq_queue, | ||
3541 | i915_seqno_passed(ring->get_seqno(dev, ring), seqno) | ||
3542 | || atomic_read(&dev_priv->mm.wedged)); | ||
3543 | ring->user_irq_put(dev, ring); | ||
3544 | |||
3545 | if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) | ||
3546 | ret = -EIO; | ||
3547 | } | ||
3548 | |||
3549 | if (ret == 0) | ||
3550 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | ||
3407 | 3551 | ||
3408 | return ret; | 3552 | return ret; |
3409 | } | 3553 | } |
@@ -3539,8 +3683,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3539 | return ret; | 3683 | return ret; |
3540 | } | 3684 | } |
3541 | 3685 | ||
3542 | 3686 | static int | |
3543 | int | ||
3544 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3687 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3545 | struct drm_file *file_priv, | 3688 | struct drm_file *file_priv, |
3546 | struct drm_i915_gem_execbuffer2 *args, | 3689 | struct drm_i915_gem_execbuffer2 *args, |
@@ -3552,13 +3695,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3552 | struct drm_i915_gem_object *obj_priv; | 3695 | struct drm_i915_gem_object *obj_priv; |
3553 | struct drm_clip_rect *cliprects = NULL; | 3696 | struct drm_clip_rect *cliprects = NULL; |
3554 | struct drm_i915_gem_relocation_entry *relocs = NULL; | 3697 | struct drm_i915_gem_relocation_entry *relocs = NULL; |
3555 | int ret = 0, ret2, i, pinned = 0; | 3698 | struct drm_i915_gem_request *request = NULL; |
3699 | int ret, ret2, i, pinned = 0; | ||
3556 | uint64_t exec_offset; | 3700 | uint64_t exec_offset; |
3557 | uint32_t seqno, flush_domains, reloc_index; | 3701 | uint32_t reloc_index; |
3558 | int pin_tries, flips; | 3702 | int pin_tries, flips; |
3559 | 3703 | ||
3560 | struct intel_ring_buffer *ring = NULL; | 3704 | struct intel_ring_buffer *ring = NULL; |
3561 | 3705 | ||
3706 | ret = i915_gem_check_is_wedged(dev); | ||
3707 | if (ret) | ||
3708 | return ret; | ||
3709 | |||
3562 | #if WATCH_EXEC | 3710 | #if WATCH_EXEC |
3563 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 3711 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", |
3564 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 3712 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); |
@@ -3605,20 +3753,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3605 | } | 3753 | } |
3606 | } | 3754 | } |
3607 | 3755 | ||
3756 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
3757 | if (request == NULL) { | ||
3758 | ret = -ENOMEM; | ||
3759 | goto pre_mutex_err; | ||
3760 | } | ||
3761 | |||
3608 | ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, | 3762 | ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, |
3609 | &relocs); | 3763 | &relocs); |
3610 | if (ret != 0) | 3764 | if (ret != 0) |
3611 | goto pre_mutex_err; | 3765 | goto pre_mutex_err; |
3612 | 3766 | ||
3613 | mutex_lock(&dev->struct_mutex); | 3767 | ret = i915_mutex_lock_interruptible(dev); |
3614 | 3768 | if (ret) | |
3615 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3616 | |||
3617 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
3618 | mutex_unlock(&dev->struct_mutex); | ||
3619 | ret = -EIO; | ||
3620 | goto pre_mutex_err; | 3769 | goto pre_mutex_err; |
3621 | } | ||
3622 | 3770 | ||
3623 | if (dev_priv->mm.suspended) { | 3771 | if (dev_priv->mm.suspended) { |
3624 | mutex_unlock(&dev->struct_mutex); | 3772 | mutex_unlock(&dev->struct_mutex); |
@@ -3698,15 +3846,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3698 | pinned+1, args->buffer_count, | 3846 | pinned+1, args->buffer_count, |
3699 | total_size, num_fences, | 3847 | total_size, num_fences, |
3700 | ret); | 3848 | ret); |
3701 | DRM_ERROR("%d objects [%d pinned], " | 3849 | DRM_ERROR("%u objects [%u pinned, %u GTT], " |
3702 | "%d object bytes [%d pinned], " | 3850 | "%zu object bytes [%zu pinned], " |
3703 | "%d/%d gtt bytes\n", | 3851 | "%zu /%zu gtt bytes\n", |
3704 | atomic_read(&dev->object_count), | 3852 | dev_priv->mm.object_count, |
3705 | atomic_read(&dev->pin_count), | 3853 | dev_priv->mm.pin_count, |
3706 | atomic_read(&dev->object_memory), | 3854 | dev_priv->mm.gtt_count, |
3707 | atomic_read(&dev->pin_memory), | 3855 | dev_priv->mm.object_memory, |
3708 | atomic_read(&dev->gtt_memory), | 3856 | dev_priv->mm.pin_memory, |
3709 | dev->gtt_total); | 3857 | dev_priv->mm.gtt_memory, |
3858 | dev_priv->mm.gtt_total); | ||
3710 | } | 3859 | } |
3711 | goto err; | 3860 | goto err; |
3712 | } | 3861 | } |
@@ -3739,15 +3888,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3739 | goto err; | 3888 | goto err; |
3740 | } | 3889 | } |
3741 | 3890 | ||
3742 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3743 | |||
3744 | /* Zero the global flush/invalidate flags. These | 3891 | /* Zero the global flush/invalidate flags. These |
3745 | * will be modified as new domains are computed | 3892 | * will be modified as new domains are computed |
3746 | * for each object | 3893 | * for each object |
3747 | */ | 3894 | */ |
3748 | dev->invalidate_domains = 0; | 3895 | dev->invalidate_domains = 0; |
3749 | dev->flush_domains = 0; | 3896 | dev->flush_domains = 0; |
3750 | dev_priv->flush_rings = 0; | 3897 | dev_priv->mm.flush_rings = 0; |
3751 | 3898 | ||
3752 | for (i = 0; i < args->buffer_count; i++) { | 3899 | for (i = 0; i < args->buffer_count; i++) { |
3753 | struct drm_gem_object *obj = object_list[i]; | 3900 | struct drm_gem_object *obj = object_list[i]; |
@@ -3756,8 +3903,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3756 | i915_gem_object_set_to_gpu_domain(obj); | 3903 | i915_gem_object_set_to_gpu_domain(obj); |
3757 | } | 3904 | } |
3758 | 3905 | ||
3759 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3760 | |||
3761 | if (dev->invalidate_domains | dev->flush_domains) { | 3906 | if (dev->invalidate_domains | dev->flush_domains) { |
3762 | #if WATCH_EXEC | 3907 | #if WATCH_EXEC |
3763 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | 3908 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", |
@@ -3765,17 +3910,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3765 | dev->invalidate_domains, | 3910 | dev->invalidate_domains, |
3766 | dev->flush_domains); | 3911 | dev->flush_domains); |
3767 | #endif | 3912 | #endif |
3768 | i915_gem_flush(dev, | 3913 | i915_gem_flush(dev, file_priv, |
3769 | dev->invalidate_domains, | 3914 | dev->invalidate_domains, |
3770 | dev->flush_domains); | 3915 | dev->flush_domains, |
3771 | if (dev_priv->flush_rings & FLUSH_RENDER_RING) | 3916 | dev_priv->mm.flush_rings); |
3772 | (void)i915_add_request(dev, file_priv, | ||
3773 | dev->flush_domains, | ||
3774 | &dev_priv->render_ring); | ||
3775 | if (dev_priv->flush_rings & FLUSH_BSD_RING) | ||
3776 | (void)i915_add_request(dev, file_priv, | ||
3777 | dev->flush_domains, | ||
3778 | &dev_priv->bsd_ring); | ||
3779 | } | 3917 | } |
3780 | 3918 | ||
3781 | for (i = 0; i < args->buffer_count; i++) { | 3919 | for (i = 0; i < args->buffer_count; i++) { |
@@ -3787,16 +3925,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3787 | if (obj->write_domain) | 3925 | if (obj->write_domain) |
3788 | list_move_tail(&obj_priv->gpu_write_list, | 3926 | list_move_tail(&obj_priv->gpu_write_list, |
3789 | &dev_priv->mm.gpu_write_list); | 3927 | &dev_priv->mm.gpu_write_list); |
3790 | else | ||
3791 | list_del_init(&obj_priv->gpu_write_list); | ||
3792 | 3928 | ||
3793 | trace_i915_gem_object_change_domain(obj, | 3929 | trace_i915_gem_object_change_domain(obj, |
3794 | obj->read_domains, | 3930 | obj->read_domains, |
3795 | old_write_domain); | 3931 | old_write_domain); |
3796 | } | 3932 | } |
3797 | 3933 | ||
3798 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3799 | |||
3800 | #if WATCH_COHERENCY | 3934 | #if WATCH_COHERENCY |
3801 | for (i = 0; i < args->buffer_count; i++) { | 3935 | for (i = 0; i < args->buffer_count; i++) { |
3802 | i915_gem_object_check_coherency(object_list[i], | 3936 | i915_gem_object_check_coherency(object_list[i], |
@@ -3823,33 +3957,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3823 | * Ensure that the commands in the batch buffer are | 3957 | * Ensure that the commands in the batch buffer are |
3824 | * finished before the interrupt fires | 3958 | * finished before the interrupt fires |
3825 | */ | 3959 | */ |
3826 | flush_domains = i915_retire_commands(dev, ring); | 3960 | i915_retire_commands(dev, ring); |
3827 | |||
3828 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3829 | 3961 | ||
3830 | /* | ||
3831 | * Get a seqno representing the execution of the current buffer, | ||
3832 | * which we can wait on. We would like to mitigate these interrupts, | ||
3833 | * likely by only creating seqnos occasionally (so that we have | ||
3834 | * *some* interrupts representing completion of buffers that we can | ||
3835 | * wait on when trying to clear up gtt space). | ||
3836 | */ | ||
3837 | seqno = i915_add_request(dev, file_priv, flush_domains, ring); | ||
3838 | BUG_ON(seqno == 0); | ||
3839 | for (i = 0; i < args->buffer_count; i++) { | 3962 | for (i = 0; i < args->buffer_count; i++) { |
3840 | struct drm_gem_object *obj = object_list[i]; | 3963 | struct drm_gem_object *obj = object_list[i]; |
3841 | obj_priv = to_intel_bo(obj); | 3964 | obj_priv = to_intel_bo(obj); |
3842 | 3965 | ||
3843 | i915_gem_object_move_to_active(obj, seqno, ring); | 3966 | i915_gem_object_move_to_active(obj, ring); |
3844 | #if WATCH_LRU | ||
3845 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); | ||
3846 | #endif | ||
3847 | } | 3967 | } |
3848 | #if WATCH_LRU | ||
3849 | i915_dump_lru(dev, __func__); | ||
3850 | #endif | ||
3851 | 3968 | ||
3852 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3969 | i915_add_request(dev, file_priv, request, ring); |
3970 | request = NULL; | ||
3853 | 3971 | ||
3854 | err: | 3972 | err: |
3855 | for (i = 0; i < pinned; i++) | 3973 | for (i = 0; i < pinned; i++) |
@@ -3882,6 +4000,7 @@ pre_mutex_err: | |||
3882 | 4000 | ||
3883 | drm_free_large(object_list); | 4001 | drm_free_large(object_list); |
3884 | kfree(cliprects); | 4002 | kfree(cliprects); |
4003 | kfree(request); | ||
3885 | 4004 | ||
3886 | return ret; | 4005 | return ret; |
3887 | } | 4006 | } |
@@ -3938,7 +4057,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3938 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | 4057 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; |
3939 | exec2_list[i].alignment = exec_list[i].alignment; | 4058 | exec2_list[i].alignment = exec_list[i].alignment; |
3940 | exec2_list[i].offset = exec_list[i].offset; | 4059 | exec2_list[i].offset = exec_list[i].offset; |
3941 | if (!IS_I965G(dev)) | 4060 | if (INTEL_INFO(dev)->gen < 4) |
3942 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | 4061 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; |
3943 | else | 4062 | else |
3944 | exec2_list[i].flags = 0; | 4063 | exec2_list[i].flags = 0; |
@@ -4035,12 +4154,12 @@ int | |||
4035 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4154 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
4036 | { | 4155 | { |
4037 | struct drm_device *dev = obj->dev; | 4156 | struct drm_device *dev = obj->dev; |
4157 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4038 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4158 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4039 | int ret; | 4159 | int ret; |
4040 | 4160 | ||
4041 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | 4161 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); |
4042 | 4162 | WARN_ON(i915_verify_lists(dev)); | |
4043 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
4044 | 4163 | ||
4045 | if (obj_priv->gtt_space != NULL) { | 4164 | if (obj_priv->gtt_space != NULL) { |
4046 | if (alignment == 0) | 4165 | if (alignment == 0) |
@@ -4068,14 +4187,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
4068 | * remove it from the inactive list | 4187 | * remove it from the inactive list |
4069 | */ | 4188 | */ |
4070 | if (obj_priv->pin_count == 1) { | 4189 | if (obj_priv->pin_count == 1) { |
4071 | atomic_inc(&dev->pin_count); | 4190 | i915_gem_info_add_pin(dev_priv, obj->size); |
4072 | atomic_add(obj->size, &dev->pin_memory); | 4191 | if (!obj_priv->active) |
4073 | if (!obj_priv->active && | 4192 | list_move_tail(&obj_priv->list, |
4074 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 4193 | &dev_priv->mm.pinned_list); |
4075 | list_del_init(&obj_priv->list); | ||
4076 | } | 4194 | } |
4077 | i915_verify_inactive(dev, __FILE__, __LINE__); | ||
4078 | 4195 | ||
4196 | WARN_ON(i915_verify_lists(dev)); | ||
4079 | return 0; | 4197 | return 0; |
4080 | } | 4198 | } |
4081 | 4199 | ||
@@ -4086,7 +4204,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) | |||
4086 | drm_i915_private_t *dev_priv = dev->dev_private; | 4204 | drm_i915_private_t *dev_priv = dev->dev_private; |
4087 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4205 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4088 | 4206 | ||
4089 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4207 | WARN_ON(i915_verify_lists(dev)); |
4090 | obj_priv->pin_count--; | 4208 | obj_priv->pin_count--; |
4091 | BUG_ON(obj_priv->pin_count < 0); | 4209 | BUG_ON(obj_priv->pin_count < 0); |
4092 | BUG_ON(obj_priv->gtt_space == NULL); | 4210 | BUG_ON(obj_priv->gtt_space == NULL); |
@@ -4096,14 +4214,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj) | |||
4096 | * the inactive list | 4214 | * the inactive list |
4097 | */ | 4215 | */ |
4098 | if (obj_priv->pin_count == 0) { | 4216 | if (obj_priv->pin_count == 0) { |
4099 | if (!obj_priv->active && | 4217 | if (!obj_priv->active) |
4100 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | ||
4101 | list_move_tail(&obj_priv->list, | 4218 | list_move_tail(&obj_priv->list, |
4102 | &dev_priv->mm.inactive_list); | 4219 | &dev_priv->mm.inactive_list); |
4103 | atomic_dec(&dev->pin_count); | 4220 | i915_gem_info_remove_pin(dev_priv, obj->size); |
4104 | atomic_sub(obj->size, &dev->pin_memory); | ||
4105 | } | 4221 | } |
4106 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4222 | WARN_ON(i915_verify_lists(dev)); |
4107 | } | 4223 | } |
4108 | 4224 | ||
4109 | int | 4225 | int |
@@ -4115,17 +4231,20 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4115 | struct drm_i915_gem_object *obj_priv; | 4231 | struct drm_i915_gem_object *obj_priv; |
4116 | int ret; | 4232 | int ret; |
4117 | 4233 | ||
4118 | mutex_lock(&dev->struct_mutex); | ||
4119 | |||
4120 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4234 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4121 | if (obj == NULL) { | 4235 | if (obj == NULL) { |
4122 | DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", | 4236 | DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", |
4123 | args->handle); | 4237 | args->handle); |
4124 | mutex_unlock(&dev->struct_mutex); | ||
4125 | return -ENOENT; | 4238 | return -ENOENT; |
4126 | } | 4239 | } |
4127 | obj_priv = to_intel_bo(obj); | 4240 | obj_priv = to_intel_bo(obj); |
4128 | 4241 | ||
4242 | ret = i915_mutex_lock_interruptible(dev); | ||
4243 | if (ret) { | ||
4244 | drm_gem_object_unreference_unlocked(obj); | ||
4245 | return ret; | ||
4246 | } | ||
4247 | |||
4129 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4248 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
4130 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4249 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
4131 | drm_gem_object_unreference(obj); | 4250 | drm_gem_object_unreference(obj); |
@@ -4170,18 +4289,23 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
4170 | struct drm_i915_gem_pin *args = data; | 4289 | struct drm_i915_gem_pin *args = data; |
4171 | struct drm_gem_object *obj; | 4290 | struct drm_gem_object *obj; |
4172 | struct drm_i915_gem_object *obj_priv; | 4291 | struct drm_i915_gem_object *obj_priv; |
4173 | 4292 | int ret; | |
4174 | mutex_lock(&dev->struct_mutex); | ||
4175 | 4293 | ||
4176 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4294 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4177 | if (obj == NULL) { | 4295 | if (obj == NULL) { |
4178 | DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", | 4296 | DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", |
4179 | args->handle); | 4297 | args->handle); |
4180 | mutex_unlock(&dev->struct_mutex); | ||
4181 | return -ENOENT; | 4298 | return -ENOENT; |
4182 | } | 4299 | } |
4183 | 4300 | ||
4184 | obj_priv = to_intel_bo(obj); | 4301 | obj_priv = to_intel_bo(obj); |
4302 | |||
4303 | ret = i915_mutex_lock_interruptible(dev); | ||
4304 | if (ret) { | ||
4305 | drm_gem_object_unreference_unlocked(obj); | ||
4306 | return ret; | ||
4307 | } | ||
4308 | |||
4185 | if (obj_priv->pin_filp != file_priv) { | 4309 | if (obj_priv->pin_filp != file_priv) { |
4186 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4310 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4187 | args->handle); | 4311 | args->handle); |
@@ -4207,6 +4331,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4207 | struct drm_i915_gem_busy *args = data; | 4331 | struct drm_i915_gem_busy *args = data; |
4208 | struct drm_gem_object *obj; | 4332 | struct drm_gem_object *obj; |
4209 | struct drm_i915_gem_object *obj_priv; | 4333 | struct drm_i915_gem_object *obj_priv; |
4334 | int ret; | ||
4210 | 4335 | ||
4211 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4336 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4212 | if (obj == NULL) { | 4337 | if (obj == NULL) { |
@@ -4215,7 +4340,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4215 | return -ENOENT; | 4340 | return -ENOENT; |
4216 | } | 4341 | } |
4217 | 4342 | ||
4218 | mutex_lock(&dev->struct_mutex); | 4343 | ret = i915_mutex_lock_interruptible(dev); |
4344 | if (ret) { | ||
4345 | drm_gem_object_unreference_unlocked(obj); | ||
4346 | return ret; | ||
4347 | } | ||
4219 | 4348 | ||
4220 | /* Count all active objects as busy, even if they are currently not used | 4349 | /* Count all active objects as busy, even if they are currently not used |
4221 | * by the gpu. Users of this interface expect objects to eventually | 4350 | * by the gpu. Users of this interface expect objects to eventually |
@@ -4230,10 +4359,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4230 | * use this buffer rather sooner than later, so issuing the required | 4359 | * use this buffer rather sooner than later, so issuing the required |
4231 | * flush earlier is beneficial. | 4360 | * flush earlier is beneficial. |
4232 | */ | 4361 | */ |
4233 | if (obj->write_domain) { | 4362 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) |
4234 | i915_gem_flush(dev, 0, obj->write_domain); | 4363 | i915_gem_flush_ring(dev, file_priv, |
4235 | (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); | 4364 | obj_priv->ring, |
4236 | } | 4365 | 0, obj->write_domain); |
4237 | 4366 | ||
4238 | /* Update the active list for the hardware's current position. | 4367 | /* Update the active list for the hardware's current position. |
4239 | * Otherwise this only updates on a delayed timer or when irqs | 4368 | * Otherwise this only updates on a delayed timer or when irqs |
@@ -4264,6 +4393,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4264 | struct drm_i915_gem_madvise *args = data; | 4393 | struct drm_i915_gem_madvise *args = data; |
4265 | struct drm_gem_object *obj; | 4394 | struct drm_gem_object *obj; |
4266 | struct drm_i915_gem_object *obj_priv; | 4395 | struct drm_i915_gem_object *obj_priv; |
4396 | int ret; | ||
4267 | 4397 | ||
4268 | switch (args->madv) { | 4398 | switch (args->madv) { |
4269 | case I915_MADV_DONTNEED: | 4399 | case I915_MADV_DONTNEED: |
@@ -4279,10 +4409,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4279 | args->handle); | 4409 | args->handle); |
4280 | return -ENOENT; | 4410 | return -ENOENT; |
4281 | } | 4411 | } |
4282 | |||
4283 | mutex_lock(&dev->struct_mutex); | ||
4284 | obj_priv = to_intel_bo(obj); | 4412 | obj_priv = to_intel_bo(obj); |
4285 | 4413 | ||
4414 | ret = i915_mutex_lock_interruptible(dev); | ||
4415 | if (ret) { | ||
4416 | drm_gem_object_unreference_unlocked(obj); | ||
4417 | return ret; | ||
4418 | } | ||
4419 | |||
4286 | if (obj_priv->pin_count) { | 4420 | if (obj_priv->pin_count) { |
4287 | drm_gem_object_unreference(obj); | 4421 | drm_gem_object_unreference(obj); |
4288 | mutex_unlock(&dev->struct_mutex); | 4422 | mutex_unlock(&dev->struct_mutex); |
@@ -4310,6 +4444,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4310 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 4444 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, |
4311 | size_t size) | 4445 | size_t size) |
4312 | { | 4446 | { |
4447 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4313 | struct drm_i915_gem_object *obj; | 4448 | struct drm_i915_gem_object *obj; |
4314 | 4449 | ||
4315 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 4450 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
@@ -4321,6 +4456,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | |||
4321 | return NULL; | 4456 | return NULL; |
4322 | } | 4457 | } |
4323 | 4458 | ||
4459 | i915_gem_info_add_obj(dev_priv, size); | ||
4460 | |||
4324 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 4461 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
4325 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 4462 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
4326 | 4463 | ||
@@ -4361,6 +4498,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) | |||
4361 | i915_gem_free_mmap_offset(obj); | 4498 | i915_gem_free_mmap_offset(obj); |
4362 | 4499 | ||
4363 | drm_gem_object_release(obj); | 4500 | drm_gem_object_release(obj); |
4501 | i915_gem_info_remove_obj(dev_priv, obj->size); | ||
4364 | 4502 | ||
4365 | kfree(obj_priv->page_cpu_valid); | 4503 | kfree(obj_priv->page_cpu_valid); |
4366 | kfree(obj_priv->bit_17); | 4504 | kfree(obj_priv->bit_17); |
@@ -4419,7 +4557,7 @@ i915_gem_idle(struct drm_device *dev) | |||
4419 | * And not confound mm.suspended! | 4557 | * And not confound mm.suspended! |
4420 | */ | 4558 | */ |
4421 | dev_priv->mm.suspended = 1; | 4559 | dev_priv->mm.suspended = 1; |
4422 | del_timer(&dev_priv->hangcheck_timer); | 4560 | del_timer_sync(&dev_priv->hangcheck_timer); |
4423 | 4561 | ||
4424 | i915_kernel_lost_context(dev); | 4562 | i915_kernel_lost_context(dev); |
4425 | i915_gem_cleanup_ringbuffer(dev); | 4563 | i915_gem_cleanup_ringbuffer(dev); |
@@ -4499,28 +4637,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4499 | drm_i915_private_t *dev_priv = dev->dev_private; | 4637 | drm_i915_private_t *dev_priv = dev->dev_private; |
4500 | int ret; | 4638 | int ret; |
4501 | 4639 | ||
4502 | dev_priv->render_ring = render_ring; | ||
4503 | |||
4504 | if (!I915_NEED_GFX_HWS(dev)) { | ||
4505 | dev_priv->render_ring.status_page.page_addr | ||
4506 | = dev_priv->status_page_dmah->vaddr; | ||
4507 | memset(dev_priv->render_ring.status_page.page_addr, | ||
4508 | 0, PAGE_SIZE); | ||
4509 | } | ||
4510 | |||
4511 | if (HAS_PIPE_CONTROL(dev)) { | 4640 | if (HAS_PIPE_CONTROL(dev)) { |
4512 | ret = i915_gem_init_pipe_control(dev); | 4641 | ret = i915_gem_init_pipe_control(dev); |
4513 | if (ret) | 4642 | if (ret) |
4514 | return ret; | 4643 | return ret; |
4515 | } | 4644 | } |
4516 | 4645 | ||
4517 | ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); | 4646 | ret = intel_init_render_ring_buffer(dev); |
4518 | if (ret) | 4647 | if (ret) |
4519 | goto cleanup_pipe_control; | 4648 | goto cleanup_pipe_control; |
4520 | 4649 | ||
4521 | if (HAS_BSD(dev)) { | 4650 | if (HAS_BSD(dev)) { |
4522 | dev_priv->bsd_ring = bsd_ring; | 4651 | ret = intel_init_bsd_ring_buffer(dev); |
4523 | ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | ||
4524 | if (ret) | 4652 | if (ret) |
4525 | goto cleanup_render_ring; | 4653 | goto cleanup_render_ring; |
4526 | } | 4654 | } |
@@ -4573,11 +4701,8 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4573 | return ret; | 4701 | return ret; |
4574 | } | 4702 | } |
4575 | 4703 | ||
4576 | spin_lock(&dev_priv->mm.active_list_lock); | ||
4577 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); | 4704 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); |
4578 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); | 4705 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); |
4579 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
4580 | |||
4581 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 4706 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
4582 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 4707 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
4583 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); | 4708 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); |
@@ -4629,10 +4754,10 @@ i915_gem_load(struct drm_device *dev) | |||
4629 | int i; | 4754 | int i; |
4630 | drm_i915_private_t *dev_priv = dev->dev_private; | 4755 | drm_i915_private_t *dev_priv = dev->dev_private; |
4631 | 4756 | ||
4632 | spin_lock_init(&dev_priv->mm.active_list_lock); | ||
4633 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4757 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4634 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | 4758 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); |
4635 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4759 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4760 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); | ||
4636 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4761 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4637 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 4762 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
4638 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 4763 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); |
@@ -4645,6 +4770,7 @@ i915_gem_load(struct drm_device *dev) | |||
4645 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 4770 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4646 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 4771 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
4647 | i915_gem_retire_work_handler); | 4772 | i915_gem_retire_work_handler); |
4773 | init_completion(&dev_priv->error_completion); | ||
4648 | spin_lock(&shrink_list_lock); | 4774 | spin_lock(&shrink_list_lock); |
4649 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | 4775 | list_add(&dev_priv->mm.shrink_list, &shrink_list); |
4650 | spin_unlock(&shrink_list_lock); | 4776 | spin_unlock(&shrink_list_lock); |
@@ -4663,21 +4789,30 @@ i915_gem_load(struct drm_device *dev) | |||
4663 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4789 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4664 | dev_priv->fence_reg_start = 3; | 4790 | dev_priv->fence_reg_start = 3; |
4665 | 4791 | ||
4666 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 4792 | if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4667 | dev_priv->num_fence_regs = 16; | 4793 | dev_priv->num_fence_regs = 16; |
4668 | else | 4794 | else |
4669 | dev_priv->num_fence_regs = 8; | 4795 | dev_priv->num_fence_regs = 8; |
4670 | 4796 | ||
4671 | /* Initialize fence registers to zero */ | 4797 | /* Initialize fence registers to zero */ |
4672 | if (IS_I965G(dev)) { | 4798 | switch (INTEL_INFO(dev)->gen) { |
4799 | case 6: | ||
4800 | for (i = 0; i < 16; i++) | ||
4801 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0); | ||
4802 | break; | ||
4803 | case 5: | ||
4804 | case 4: | ||
4673 | for (i = 0; i < 16; i++) | 4805 | for (i = 0; i < 16; i++) |
4674 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); | 4806 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); |
4675 | } else { | 4807 | break; |
4676 | for (i = 0; i < 8; i++) | 4808 | case 3: |
4677 | I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); | ||
4678 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 4809 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4679 | for (i = 0; i < 8; i++) | 4810 | for (i = 0; i < 8; i++) |
4680 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | 4811 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); |
4812 | case 2: | ||
4813 | for (i = 0; i < 8; i++) | ||
4814 | I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); | ||
4815 | break; | ||
4681 | } | 4816 | } |
4682 | i915_gem_detect_bit_6_swizzle(dev); | 4817 | i915_gem_detect_bit_6_swizzle(dev); |
4683 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 4818 | init_waitqueue_head(&dev_priv->pending_flip_queue); |
@@ -4687,8 +4822,8 @@ i915_gem_load(struct drm_device *dev) | |||
4687 | * Create a physically contiguous memory object for this object | 4822 | * Create a physically contiguous memory object for this object |
4688 | * e.g. for cursor + overlay regs | 4823 | * e.g. for cursor + overlay regs |
4689 | */ | 4824 | */ |
4690 | int i915_gem_init_phys_object(struct drm_device *dev, | 4825 | static int i915_gem_init_phys_object(struct drm_device *dev, |
4691 | int id, int size, int align) | 4826 | int id, int size, int align) |
4692 | { | 4827 | { |
4693 | drm_i915_private_t *dev_priv = dev->dev_private; | 4828 | drm_i915_private_t *dev_priv = dev->dev_private; |
4694 | struct drm_i915_gem_phys_object *phys_obj; | 4829 | struct drm_i915_gem_phys_object *phys_obj; |
@@ -4720,7 +4855,7 @@ kfree_obj: | |||
4720 | return ret; | 4855 | return ret; |
4721 | } | 4856 | } |
4722 | 4857 | ||
4723 | void i915_gem_free_phys_object(struct drm_device *dev, int id) | 4858 | static void i915_gem_free_phys_object(struct drm_device *dev, int id) |
4724 | { | 4859 | { |
4725 | drm_i915_private_t *dev_priv = dev->dev_private; | 4860 | drm_i915_private_t *dev_priv = dev->dev_private; |
4726 | struct drm_i915_gem_phys_object *phys_obj; | 4861 | struct drm_i915_gem_phys_object *phys_obj; |
@@ -4865,18 +5000,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4865 | return 0; | 5000 | return 0; |
4866 | } | 5001 | } |
4867 | 5002 | ||
4868 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | 5003 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
4869 | { | 5004 | { |
4870 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 5005 | struct drm_i915_file_private *file_priv = file->driver_priv; |
4871 | 5006 | ||
4872 | /* Clean up our request list when the client is going away, so that | 5007 | /* Clean up our request list when the client is going away, so that |
4873 | * later retire_requests won't dereference our soon-to-be-gone | 5008 | * later retire_requests won't dereference our soon-to-be-gone |
4874 | * file_priv. | 5009 | * file_priv. |
4875 | */ | 5010 | */ |
4876 | mutex_lock(&dev->struct_mutex); | 5011 | spin_lock(&file_priv->mm.lock); |
4877 | while (!list_empty(&i915_file_priv->mm.request_list)) | 5012 | while (!list_empty(&file_priv->mm.request_list)) { |
4878 | list_del_init(i915_file_priv->mm.request_list.next); | 5013 | struct drm_i915_gem_request *request; |
4879 | mutex_unlock(&dev->struct_mutex); | 5014 | |
5015 | request = list_first_entry(&file_priv->mm.request_list, | ||
5016 | struct drm_i915_gem_request, | ||
5017 | client_list); | ||
5018 | list_del(&request->client_list); | ||
5019 | request->file_priv = NULL; | ||
5020 | } | ||
5021 | spin_unlock(&file_priv->mm.lock); | ||
4880 | } | 5022 | } |
4881 | 5023 | ||
4882 | static int | 5024 | static int |
@@ -4885,12 +5027,10 @@ i915_gpu_is_active(struct drm_device *dev) | |||
4885 | drm_i915_private_t *dev_priv = dev->dev_private; | 5027 | drm_i915_private_t *dev_priv = dev->dev_private; |
4886 | int lists_empty; | 5028 | int lists_empty; |
4887 | 5029 | ||
4888 | spin_lock(&dev_priv->mm.active_list_lock); | ||
4889 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 5030 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && |
4890 | list_empty(&dev_priv->render_ring.active_list); | 5031 | list_empty(&dev_priv->render_ring.active_list); |
4891 | if (HAS_BSD(dev)) | 5032 | if (HAS_BSD(dev)) |
4892 | lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); | 5033 | lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); |
4893 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
4894 | 5034 | ||
4895 | return !lists_empty; | 5035 | return !lists_empty; |
4896 | } | 5036 | } |