aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-08-20 04:23:20 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-08-20 04:59:41 -0400
commit225067eedf1f4d4065940232c9069fcb255206ee (patch)
tree953d3e1702dae2add3579bf15f62cfea84a64753
parent83358c85866ebd2af1229fc9870b93e126690671 (diff)
drm/i915: move functions around
Prep work to make Chris Wilson's unbound tracking patch a bit easier to read. Alas, I'd have preferred that moving the page allocation retry loop from bind to get_pages would have been a separate patch, too. But that looks like real work ;-) Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c116
1 files changed, 58 insertions, 58 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 051459324826..0f70c2acfefa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1340,6 +1340,64 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1340 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1340 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1341} 1341}
1342 1342
1343/* Immediately discard the backing storage */
1344static void
1345i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1346{
1347 struct inode *inode;
1348
1349 /* Our goal here is to return as much of the memory as
1350 * is possible back to the system as we are called from OOM.
1351 * To do this we must instruct the shmfs to drop all of its
1352 * backing pages, *now*.
1353 */
1354 inode = obj->base.filp->f_path.dentry->d_inode;
1355 shmem_truncate_range(inode, 0, (loff_t)-1);
1356
1357 if (obj->base.map_list.map)
1358 drm_gem_free_mmap_offset(&obj->base);
1359
1360 obj->madv = __I915_MADV_PURGED;
1361}
1362
1363static inline int
1364i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1365{
1366 return obj->madv == I915_MADV_DONTNEED;
1367}
1368
1369static void
1370i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1371{
1372 int page_count = obj->base.size / PAGE_SIZE;
1373 int i;
1374
1375 if (!obj->pages)
1376 return;
1377
1378 BUG_ON(obj->madv == __I915_MADV_PURGED);
1379
1380 if (i915_gem_object_needs_bit17_swizzle(obj))
1381 i915_gem_object_save_bit_17_swizzle(obj);
1382
1383 if (obj->madv == I915_MADV_DONTNEED)
1384 obj->dirty = 0;
1385
1386 for (i = 0; i < page_count; i++) {
1387 if (obj->dirty)
1388 set_page_dirty(obj->pages[i]);
1389
1390 if (obj->madv == I915_MADV_WILLNEED)
1391 mark_page_accessed(obj->pages[i]);
1392
1393 page_cache_release(obj->pages[i]);
1394 }
1395 obj->dirty = 0;
1396
1397 drm_free_large(obj->pages);
1398 obj->pages = NULL;
1399}
1400
1343int 1401int
1344i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1402i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1345 gfp_t gfpmask) 1403 gfp_t gfpmask)
@@ -1387,38 +1445,6 @@ err_pages:
1387 return PTR_ERR(page); 1445 return PTR_ERR(page);
1388} 1446}
1389 1447
1390static void
1391i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1392{
1393 int page_count = obj->base.size / PAGE_SIZE;
1394 int i;
1395
1396 if (!obj->pages)
1397 return;
1398
1399 BUG_ON(obj->madv == __I915_MADV_PURGED);
1400
1401 if (i915_gem_object_needs_bit17_swizzle(obj))
1402 i915_gem_object_save_bit_17_swizzle(obj);
1403
1404 if (obj->madv == I915_MADV_DONTNEED)
1405 obj->dirty = 0;
1406
1407 for (i = 0; i < page_count; i++) {
1408 if (obj->dirty)
1409 set_page_dirty(obj->pages[i]);
1410
1411 if (obj->madv == I915_MADV_WILLNEED)
1412 mark_page_accessed(obj->pages[i]);
1413
1414 page_cache_release(obj->pages[i]);
1415 }
1416 obj->dirty = 0;
1417
1418 drm_free_large(obj->pages);
1419 obj->pages = NULL;
1420}
1421
1422void 1448void
1423i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1449i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1424 struct intel_ring_buffer *ring, 1450 struct intel_ring_buffer *ring,
@@ -1486,32 +1512,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1486 WARN_ON(i915_verify_lists(dev)); 1512 WARN_ON(i915_verify_lists(dev));
1487} 1513}
1488 1514
1489/* Immediately discard the backing storage */
1490static void
1491i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1492{
1493 struct inode *inode;
1494
1495 /* Our goal here is to return as much of the memory as
1496 * is possible back to the system as we are called from OOM.
1497 * To do this we must instruct the shmfs to drop all of its
1498 * backing pages, *now*.
1499 */
1500 inode = obj->base.filp->f_path.dentry->d_inode;
1501 shmem_truncate_range(inode, 0, (loff_t)-1);
1502
1503 if (obj->base.map_list.map)
1504 drm_gem_free_mmap_offset(&obj->base);
1505
1506 obj->madv = __I915_MADV_PURGED;
1507}
1508
1509static inline int
1510i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1511{
1512 return obj->madv == I915_MADV_DONTNEED;
1513}
1514
1515static u32 1515static u32
1516i915_gem_get_seqno(struct drm_device *dev) 1516i915_gem_get_seqno(struct drm_device *dev)
1517{ 1517{