aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-08-07 16:45:03 -0400
committerEric Anholt <eric@anholt.net>2010-08-09 14:24:33 -0400
commit7d1c4804ae98cdee572d7d10d8a5deaa2e686285 (patch)
treec87257e17ac1b052931fab77019503bac2176c27 /drivers
parentcd377ea93f34cbd6ec49c868b66a5a7ab184775c (diff)
drm/i915: Maintain LRU order of inactive objects upon access by CPU (v2)
In order to reduce the penalty of fallbacks under memory pressure and to avoid a potential immediate ping-pong of evicting a mmaped buffer, we move the object to the tail of the inactive list when a page is freshly faulted or the object is moved into the CPU domain. We choose not to protect the CPU objects from casual eviction, preferring to keep the GPU active for as long as possible. v2: Daniel Vetter found a bug where I forgot that pinned objects are kept off the inactive list. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b5a7b00264a6..8f3e0c10c080 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -57,6 +57,14 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
57static LIST_HEAD(shrink_list); 57static LIST_HEAD(shrink_list);
58static DEFINE_SPINLOCK(shrink_list_lock); 58static DEFINE_SPINLOCK(shrink_list_lock);
59 59
60static inline bool
61i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
62{
63 return obj_priv->gtt_space &&
64 !obj_priv->active &&
65 obj_priv->pin_count == 0;
66}
67
60int i915_gem_do_init(struct drm_device *dev, unsigned long start, 68int i915_gem_do_init(struct drm_device *dev, unsigned long start,
61 unsigned long end) 69 unsigned long end)
62{ 70{
@@ -1036,6 +1044,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1036 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1044 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1037 } 1045 }
1038 1046
1047
1048 /* Maintain LRU order of "inactive" objects */
1049 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1050 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1051
1039 drm_gem_object_unreference(obj); 1052 drm_gem_object_unreference(obj);
1040 mutex_unlock(&dev->struct_mutex); 1053 mutex_unlock(&dev->struct_mutex);
1041 return ret; 1054 return ret;
@@ -1137,6 +1150,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1137{ 1150{
1138 struct drm_gem_object *obj = vma->vm_private_data; 1151 struct drm_gem_object *obj = vma->vm_private_data;
1139 struct drm_device *dev = obj->dev; 1152 struct drm_device *dev = obj->dev;
1153 drm_i915_private_t *dev_priv = dev->dev_private;
1140 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1154 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1141 pgoff_t page_offset; 1155 pgoff_t page_offset;
1142 unsigned long pfn; 1156 unsigned long pfn;
@@ -1166,6 +1180,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1166 goto unlock; 1180 goto unlock;
1167 } 1181 }
1168 1182
1183 if (i915_gem_object_is_inactive(obj_priv))
1184 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1185
1169 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1186 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1170 page_offset; 1187 page_offset;
1171 1188