aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c99
1 files changed, 55 insertions, 44 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 140bee142fc2..80e5ba490dc2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -978,6 +978,7 @@ int
978i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 978i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
979 struct drm_file *file_priv) 979 struct drm_file *file_priv)
980{ 980{
981 struct drm_i915_private *dev_priv = dev->dev_private;
981 struct drm_i915_gem_set_domain *args = data; 982 struct drm_i915_gem_set_domain *args = data;
982 struct drm_gem_object *obj; 983 struct drm_gem_object *obj;
983 uint32_t read_domains = args->read_domains; 984 uint32_t read_domains = args->read_domains;
@@ -1010,8 +1011,18 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1010 obj, obj->size, read_domains, write_domain); 1011 obj, obj->size, read_domains, write_domain);
1011#endif 1012#endif
1012 if (read_domains & I915_GEM_DOMAIN_GTT) { 1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1015
1013 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1016 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1014 1017
1018 /* Update the LRU on the fence for the CPU access that's
1019 * about to occur.
1020 */
1021 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1022 list_move_tail(&obj_priv->fence_list,
1023 &dev_priv->mm.fence_list);
1024 }
1025
1015 /* Silently promote "you're not bound, there was nothing to do" 1026 /* Silently promote "you're not bound, there was nothing to do"
1016 * to success, since the client was just asking us to 1027 * to success, since the client was just asking us to
1017 * make sure everything was done. 1028 * make sure everything was done.
@@ -1155,8 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1155 } 1166 }
1156 1167
1157 /* Need a new fence register? */ 1168 /* Need a new fence register? */
1158 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1169 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1159 obj_priv->tiling_mode != I915_TILING_NONE) {
1160 ret = i915_gem_object_get_fence_reg(obj); 1170 ret = i915_gem_object_get_fence_reg(obj);
1161 if (ret) { 1171 if (ret) {
1162 mutex_unlock(&dev->struct_mutex); 1172 mutex_unlock(&dev->struct_mutex);
@@ -2208,6 +2218,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2208 struct drm_i915_gem_object *old_obj_priv = NULL; 2218 struct drm_i915_gem_object *old_obj_priv = NULL;
2209 int i, ret, avail; 2219 int i, ret, avail;
2210 2220
2221 /* Just update our place in the LRU if our fence is getting used. */
2222 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2223 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2224 return 0;
2225 }
2226
2211 switch (obj_priv->tiling_mode) { 2227 switch (obj_priv->tiling_mode) {
2212 case I915_TILING_NONE: 2228 case I915_TILING_NONE:
2213 WARN(1, "allocating a fence for non-tiled object?\n"); 2229 WARN(1, "allocating a fence for non-tiled object?\n");
@@ -2229,7 +2245,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2229 } 2245 }
2230 2246
2231 /* First try to find a free reg */ 2247 /* First try to find a free reg */
2232try_again:
2233 avail = 0; 2248 avail = 0;
2234 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2249 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2235 reg = &dev_priv->fence_regs[i]; 2250 reg = &dev_priv->fence_regs[i];
@@ -2243,63 +2258,62 @@ try_again:
2243 2258
2244 /* None available, try to steal one or wait for a user to finish */ 2259 /* None available, try to steal one or wait for a user to finish */
2245 if (i == dev_priv->num_fence_regs) { 2260 if (i == dev_priv->num_fence_regs) {
2246 uint32_t seqno = dev_priv->mm.next_gem_seqno; 2261 struct drm_gem_object *old_obj = NULL;
2247 2262
2248 if (avail == 0) 2263 if (avail == 0)
2249 return -ENOSPC; 2264 return -ENOSPC;
2250 2265
2251 for (i = dev_priv->fence_reg_start; 2266 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2252 i < dev_priv->num_fence_regs; i++) { 2267 fence_list) {
2253 uint32_t this_seqno; 2268 old_obj = old_obj_priv->obj;
2254
2255 reg = &dev_priv->fence_regs[i];
2256 old_obj_priv = reg->obj->driver_private;
2257 2269
2258 if (old_obj_priv->pin_count) 2270 if (old_obj_priv->pin_count)
2259 continue; 2271 continue;
2260 2272
2273 /* Take a reference, as otherwise the wait_rendering
2274 * below may cause the object to get freed out from
2275 * under us.
2276 */
2277 drm_gem_object_reference(old_obj);
2278
2261 /* i915 uses fences for GPU access to tiled buffers */ 2279 /* i915 uses fences for GPU access to tiled buffers */
2262 if (IS_I965G(dev) || !old_obj_priv->active) 2280 if (IS_I965G(dev) || !old_obj_priv->active)
2263 break; 2281 break;
2264 2282
2265 /* find the seqno of the first available fence */ 2283 /* This brings the object to the head of the LRU if it
2266 this_seqno = old_obj_priv->last_rendering_seqno; 2284 * had been written to. The only way this should
2267 if (this_seqno != 0 && 2285 * result in us waiting longer than the expected
2268 reg->obj->write_domain == 0 && 2286 * optimal amount of time is if there was a
2269 i915_seqno_passed(seqno, this_seqno)) 2287 * fence-using buffer later that was read-only.
2270 seqno = this_seqno; 2288 */
2271 } 2289 i915_gem_object_flush_gpu_write_domain(old_obj);
2272 2290 ret = i915_gem_object_wait_rendering(old_obj);
2273 /* 2291 if (ret != 0) {
2274 * Now things get ugly... we have to wait for one of the 2292 drm_gem_object_unreference(old_obj);
2275 * objects to finish before trying again. 2293 return ret;
2276 */
2277 if (i == dev_priv->num_fence_regs) {
2278 if (seqno == dev_priv->mm.next_gem_seqno) {
2279 i915_gem_flush(dev,
2280 I915_GEM_GPU_DOMAINS,
2281 I915_GEM_GPU_DOMAINS);
2282 seqno = i915_add_request(dev, NULL,
2283 I915_GEM_GPU_DOMAINS);
2284 if (seqno == 0)
2285 return -ENOMEM;
2286 } 2294 }
2287 2295
2288 ret = i915_wait_request(dev, seqno); 2296 break;
2289 if (ret)
2290 return ret;
2291 goto try_again;
2292 } 2297 }
2293 2298
2294 /* 2299 /*
2295 * Zap this virtual mapping so we can set up a fence again 2300 * Zap this virtual mapping so we can set up a fence again
2296 * for this object next time we need it. 2301 * for this object next time we need it.
2297 */ 2302 */
2298 i915_gem_release_mmap(reg->obj); 2303 i915_gem_release_mmap(old_obj);
2304
2305 i = old_obj_priv->fence_reg;
2306 reg = &dev_priv->fence_regs[i];
2307
2299 old_obj_priv->fence_reg = I915_FENCE_REG_NONE; 2308 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2309 list_del_init(&old_obj_priv->fence_list);
2310
2311 drm_gem_object_unreference(old_obj);
2300 } 2312 }
2301 2313
2302 obj_priv->fence_reg = i; 2314 obj_priv->fence_reg = i;
2315 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2316
2303 reg->obj = obj; 2317 reg->obj = obj;
2304 2318
2305 if (IS_I965G(dev)) 2319 if (IS_I965G(dev))
@@ -2342,6 +2356,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2342 2356
2343 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; 2357 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2344 obj_priv->fence_reg = I915_FENCE_REG_NONE; 2358 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2359 list_del_init(&obj_priv->fence_list);
2345} 2360}
2346 2361
2347/** 2362/**
@@ -3595,9 +3610,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3595 * Pre-965 chips need a fence register set up in order to 3610 * Pre-965 chips need a fence register set up in order to
3596 * properly handle tiled surfaces. 3611 * properly handle tiled surfaces.
3597 */ 3612 */
3598 if (!IS_I965G(dev) && 3613 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3599 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3600 obj_priv->tiling_mode != I915_TILING_NONE) {
3601 ret = i915_gem_object_get_fence_reg(obj); 3614 ret = i915_gem_object_get_fence_reg(obj);
3602 if (ret != 0) { 3615 if (ret != 0) {
3603 if (ret != -EBUSY && ret != -ERESTARTSYS) 3616 if (ret != -EBUSY && ret != -ERESTARTSYS)
@@ -3806,6 +3819,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3806 obj_priv->obj = obj; 3819 obj_priv->obj = obj;
3807 obj_priv->fence_reg = I915_FENCE_REG_NONE; 3820 obj_priv->fence_reg = I915_FENCE_REG_NONE;
3808 INIT_LIST_HEAD(&obj_priv->list); 3821 INIT_LIST_HEAD(&obj_priv->list);
3822 INIT_LIST_HEAD(&obj_priv->fence_list);
3809 3823
3810 return 0; 3824 return 0;
3811} 3825}
@@ -4218,15 +4232,11 @@ int
4218i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4232i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4219 struct drm_file *file_priv) 4233 struct drm_file *file_priv)
4220{ 4234{
4221 int ret;
4222
4223 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4235 if (drm_core_check_feature(dev, DRIVER_MODESET))
4224 return 0; 4236 return 0;
4225 4237
4226 ret = i915_gem_idle(dev);
4227 drm_irq_uninstall(dev); 4238 drm_irq_uninstall(dev);
4228 4239 return i915_gem_idle(dev);
4229 return ret;
4230} 4240}
4231 4241
4232void 4242void
@@ -4253,6 +4263,7 @@ i915_gem_load(struct drm_device *dev)
4253 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4263 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4254 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4264 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4255 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4265 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4266 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4256 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4267 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4257 i915_gem_retire_work_handler); 4268 i915_gem_retire_work_handler);
4258 dev_priv->mm.next_gem_seqno = 1; 4269 dev_priv->mm.next_gem_seqno = 1;