aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-11-10 13:53:25 -0500
committerDave Airlie <airlied@redhat.com>2008-12-03 20:21:52 -0500
commit2ef7eeaa553d88e78d9a4520271f26a7bc0e2968 (patch)
tree7a46a23d4fcc57ae03131c106d21fdac721dfce1 /drivers/gpu
parentb670d8158283c35842ae1c650f75c375d8710607 (diff)
drm/i915: Make a single set-to-gtt-domain path.
This fixes failure to flush caches in the relocation update path, and failure to wait in the set_domain ioctl, each of which could lead to incorrect rendering. Signed-off-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c115
1 files changed, 96 insertions, 19 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3fe108b7e2fa..62a059dce85f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -48,6 +48,8 @@ i915_gem_set_domain(struct drm_gem_object *obj,
48 struct drm_file *file_priv, 48 struct drm_file *file_priv,
49 uint32_t read_domains, 49 uint32_t read_domains,
50 uint32_t write_domain); 50 uint32_t write_domain);
51static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
52 int write);
51static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 53static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 54static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 55static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -260,8 +262,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
260 mutex_unlock(&dev->struct_mutex); 262 mutex_unlock(&dev->struct_mutex);
261 return ret; 263 return ret;
262 } 264 }
263 ret = i915_gem_set_domain(obj, file_priv, 265 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
264 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
265 if (ret) 266 if (ret)
266 goto fail; 267 goto fail;
267 268
@@ -397,7 +398,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
397} 398}
398 399
399/** 400/**
400 * Called when user space prepares to use an object 401 * Called when user space prepares to use an object with the CPU, either
402 * through the mmap ioctl's mapping or a GTT mapping.
401 */ 403 */
402int 404int
403i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 405i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -405,11 +407,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
405{ 407{
406 struct drm_i915_gem_set_domain *args = data; 408 struct drm_i915_gem_set_domain *args = data;
407 struct drm_gem_object *obj; 409 struct drm_gem_object *obj;
410 uint32_t read_domains = args->read_domains;
411 uint32_t write_domain = args->write_domain;
408 int ret; 412 int ret;
409 413
410 if (!(dev->driver->driver_features & DRIVER_GEM)) 414 if (!(dev->driver->driver_features & DRIVER_GEM))
411 return -ENODEV; 415 return -ENODEV;
412 416
417 /* Only handle setting domains to types used by the CPU. */
418 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419 return -EINVAL;
420
421 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
422 return -EINVAL;
423
424 /* Having something in the write domain implies it's in the read
425 * domain, and only that read domain. Enforce that in the request.
426 */
427 if (write_domain != 0 && read_domains != write_domain)
428 return -EINVAL;
429
413 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 430 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
414 if (obj == NULL) 431 if (obj == NULL)
415 return -EBADF; 432 return -EBADF;
@@ -417,10 +434,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
417 mutex_lock(&dev->struct_mutex); 434 mutex_lock(&dev->struct_mutex);
418#if WATCH_BUF 435#if WATCH_BUF
419 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 436 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
420 obj, obj->size, args->read_domains, args->write_domain); 437 obj, obj->size, read_domains, write_domain);
421#endif 438#endif
422 ret = i915_gem_set_domain(obj, file_priv, 439 if (read_domains & I915_GEM_DOMAIN_GTT) {
423 args->read_domains, args->write_domain); 440 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
441 } else {
442 ret = i915_gem_set_domain(obj, file_priv,
443 read_domains, write_domain);
444 }
445
424 drm_gem_object_unreference(obj); 446 drm_gem_object_unreference(obj);
425 mutex_unlock(&dev->struct_mutex); 447 mutex_unlock(&dev->struct_mutex);
426 return ret; 448 return ret;
@@ -1237,6 +1259,69 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1237 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1259 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1238} 1260}
1239 1261
1262/**
1263 * Moves a single object to the GTT read, and possibly write domain.
1264 *
1265 * This function returns when the move is complete, including waiting on
1266 * flushes to occur.
1267 */
1268static int
1269i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1270{
1271 struct drm_device *dev = obj->dev;
1272 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1273 uint32_t flush_domains;
1274
1275 /* Figure out what GPU domains we need to flush or invalidate for
1276 * moving to GTT.
1277 */
1278 flush_domains = obj->write_domain & I915_GEM_GPU_DOMAINS;
1279
1280 /* Queue the GPU write cache flushing we need. */
1281 if (flush_domains != 0) {
1282 uint32_t seqno;
1283
1284 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
1285 i915_gem_flush(dev, 0, flush_domains);
1286 seqno = i915_add_request(dev, flush_domains);
1287 i915_gem_object_move_to_active(obj, seqno);
1288 }
1289
1290 /* Wait on any GPU rendering and flushing to occur. */
1291 if (obj_priv->active) {
1292 int ret;
1293
1294 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1295 if (ret != 0)
1296 return ret;
1297 }
1298
1299 /* If we're writing through the GTT domain, then CPU and GPU caches
1300 * will need to be invalidated at next use.
1301 */
1302 if (write)
1303 obj->read_domains &= ~(I915_GEM_GPU_DOMAINS |
1304 I915_GEM_DOMAIN_CPU);
1305
1306 /* Flush the CPU domain if it's dirty. */
1307 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1308 i915_gem_clflush_object(obj);
1309 drm_agp_chipset_flush(dev);
1310
1311 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
1312 }
1313
1314 /* It should now be out of any other write domains, and we can update
1315 * the domain values for our changes.
1316 */
1317 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1318 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1319 if (write)
1320 obj->write_domain = I915_GEM_DOMAIN_GTT;
1321
1322 return 0;
1323}
1324
1240/* 1325/*
1241 * Set the next domain for the specified object. This 1326 * Set the next domain for the specified object. This
1242 * may not actually perform the necessary flushing/invaliding though, 1327 * may not actually perform the necessary flushing/invaliding though,
@@ -1634,19 +1719,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1634 continue; 1719 continue;
1635 } 1720 }
1636 1721
1637 /* Now that we're going to actually write some data in, 1722 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1638 * make sure that any rendering using this buffer's contents 1723 if (ret != 0) {
1639 * is completed. 1724 drm_gem_object_unreference(target_obj);
1640 */ 1725 i915_gem_object_unpin(obj);
1641 i915_gem_object_wait_rendering(obj); 1726 return -EINVAL;
1642
1643 /* As we're writing through the gtt, flush
1644 * any CPU writes before we write the relocations
1645 */
1646 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1647 i915_gem_clflush_object(obj);
1648 drm_agp_chipset_flush(dev);
1649 obj->write_domain = 0;
1650 } 1727 }
1651 1728
1652 /* Map the page containing the relocation we're going to 1729 /* Map the page containing the relocation we're going to