aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-07-16 19:50:08 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-17 16:24:32 -0400
commit5cef07e1628300aeda9ac9dae95a2b406175b3ff (patch)
treeddc33253b05f93a526d5f9cf7b9d128abfe701a4 /drivers/gpu/drm/i915/i915_drv.h
parenta7bbbd63e79a89b3e7b77eb734f2773ad69a2a43 (diff)
drm/i915: Move active/inactive lists to new mm
Shamelessly manipulated out of Daniel :-) "When moving the lists around explain that the active/inactive stuff is used by eviction when we run out of address space, so needs to be per-vma and per-address space. Bound/unbound otoh is used by the shrinker which only cares about the amount of memory used and not one bit about in which address space this memory is all used in. Of course to actual kick out an object we need to unbind it from every address space, but for that we have the per-object list of vmas." v2: Leave the bound list as a global one. (Chris, indirectly) v3: Rebased with no i915_gtt_vm. In most places I added a new *vm local, since it will eventually be replaces by a vm argument. Put comment back inline, since it no longer makes sense to do otherwise. v4: Rebased on hangcheck/error state movement Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h46
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e1664e8a599..ee21af3a17ac 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -458,6 +458,29 @@ struct i915_address_space {
458 struct page *page; 458 struct page *page;
459 } scratch; 459 } scratch;
460 460
461 /**
462 * List of objects currently involved in rendering.
463 *
464 * Includes buffers having the contents of their GPU caches
465 * flushed, not necessarily primitives. last_rendering_seqno
466 * represents when the rendering involved will be completed.
467 *
468 * A reference is held on the buffer while on this list.
469 */
470 struct list_head active_list;
471
472 /**
473 * LRU list of objects which are not in the ringbuffer and
474 * are ready to unbind, but are still in the GTT.
475 *
476 * last_rendering_seqno is 0 while an object is in this list.
477 *
478 * A reference is not held on the buffer while on this list,
479 * as merely being GTT-bound shouldn't prevent its being
480 * freed, and we'll pull it off the list in the free path.
481 */
482 struct list_head inactive_list;
483
461 /* FIXME: Need a more generic return type */ 484 /* FIXME: Need a more generic return type */
462 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 485 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
463 enum i915_cache_level level); 486 enum i915_cache_level level);
@@ -852,29 +875,6 @@ struct i915_gem_mm {
852 struct shrinker inactive_shrinker; 875 struct shrinker inactive_shrinker;
853 bool shrinker_no_lock_stealing; 876 bool shrinker_no_lock_stealing;
854 877
855 /**
856 * List of objects currently involved in rendering.
857 *
858 * Includes buffers having the contents of their GPU caches
859 * flushed, not necessarily primitives. last_rendering_seqno
860 * represents when the rendering involved will be completed.
861 *
862 * A reference is held on the buffer while on this list.
863 */
864 struct list_head active_list;
865
866 /**
867 * LRU list of objects which are not in the ringbuffer and
868 * are ready to unbind, but are still in the GTT.
869 *
870 * last_rendering_seqno is 0 while an object is in this list.
871 *
872 * A reference is not held on the buffer while on this list,
873 * as merely being GTT-bound shouldn't prevent its being
874 * freed, and we'll pull it off the list in the free path.
875 */
876 struct list_head inactive_list;
877
878 /** LRU list of objects with fence regs on them. */ 878 /** LRU list of objects with fence regs on them. */
879 struct list_head fence_list; 879 struct list_head fence_list;
880 880