aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2009-09-20 18:03:54 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2009-09-22 20:10:36 -0400
commit963b483691314ed174ceb883f2b9f13b3ef7fb33 (patch)
tree14547cca5a646c19b5eb670143a90b28761b9462
parent13a05fd978a110d1efcda4a09e225aa156204ea3 (diff)
drm/i915: Do not mis-classify clean objects as purgeable
Whilst cleaning up the patches for submission, I mis-classified non-dirty objects as purgeable. This was causing the backing pages for those objects to be evicted under memory-pressure, discarding valid and unreplaceable texture data. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c55
1 files changed, 26 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c8ecc75aa79b..c69f1fa38cc8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1530,6 +1530,23 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1530 obj_priv->last_rendering_seqno = 0; 1530 obj_priv->last_rendering_seqno = 0;
1531} 1531}
1532 1532
1533/* Immediately discard the backing storage */
1534static void
1535i915_gem_object_truncate(struct drm_gem_object *obj)
1536{
1537 struct inode *inode;
1538
1539 inode = obj->filp->f_path.dentry->d_inode;
1540 if (inode->i_op->truncate)
1541 inode->i_op->truncate (inode);
1542}
1543
1544static inline int
1545i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1546{
1547 return obj_priv->madv == I915_MADV_DONTNEED;
1548}
1549
1533static void 1550static void
1534i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 1551i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1535{ 1552{
@@ -2018,17 +2035,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2018 if (!list_empty(&obj_priv->list)) 2035 if (!list_empty(&obj_priv->list))
2019 list_del_init(&obj_priv->list); 2036 list_del_init(&obj_priv->list);
2020 2037
2038 if (i915_gem_object_is_purgeable(obj_priv))
2039 i915_gem_object_truncate(obj);
2040
2021 trace_i915_gem_object_unbind(obj); 2041 trace_i915_gem_object_unbind(obj);
2022 2042
2023 return 0; 2043 return 0;
2024} 2044}
2025 2045
2026static inline int
2027i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
2028{
2029 return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED;
2030}
2031
2032static struct drm_gem_object * 2046static struct drm_gem_object *
2033i915_gem_find_inactive_object(struct drm_device *dev, int min_size) 2047i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2034{ 2048{
@@ -2041,7 +2055,8 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2041 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 2055 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2042 struct drm_gem_object *obj = obj_priv->obj; 2056 struct drm_gem_object *obj = obj_priv->obj;
2043 if (obj->size >= min_size) { 2057 if (obj->size >= min_size) {
2044 if (i915_gem_object_is_purgeable(obj_priv) && 2058 if ((!obj_priv->dirty ||
2059 i915_gem_object_is_purgeable(obj_priv)) &&
2045 (!best || obj->size < best->size)) { 2060 (!best || obj->size < best->size)) {
2046 best = obj; 2061 best = obj;
2047 if (best->size == min_size) 2062 if (best->size == min_size)
@@ -4808,19 +4823,6 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4808 mutex_unlock(&dev->struct_mutex); 4823 mutex_unlock(&dev->struct_mutex);
4809} 4824}
4810 4825
4811/* Immediately discard the backing storage */
4812static void
4813i915_gem_object_truncate(struct drm_gem_object *obj)
4814{
4815 struct inode *inode;
4816
4817 inode = obj->filp->f_path.dentry->d_inode;
4818
4819 mutex_lock(&inode->i_mutex);
4820 truncate_inode_pages(inode->i_mapping, 0);
4821 mutex_unlock(&inode->i_mutex);
4822}
4823
4824static int 4826static int
4825i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) 4827i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4826{ 4828{
@@ -4866,10 +4868,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4866 &dev_priv->mm.inactive_list, 4868 &dev_priv->mm.inactive_list,
4867 list) { 4869 list) {
4868 if (i915_gem_object_is_purgeable(obj_priv)) { 4870 if (i915_gem_object_is_purgeable(obj_priv)) {
4869 struct drm_gem_object *obj = obj_priv->obj; 4871 i915_gem_object_unbind(obj_priv->obj);
4870 i915_gem_object_unbind(obj);
4871 i915_gem_object_truncate(obj);
4872
4873 if (--nr_to_scan <= 0) 4872 if (--nr_to_scan <= 0)
4874 break; 4873 break;
4875 } 4874 }
@@ -4878,6 +4877,8 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4878 spin_lock(&shrink_list_lock); 4877 spin_lock(&shrink_list_lock);
4879 mutex_unlock(&dev->struct_mutex); 4878 mutex_unlock(&dev->struct_mutex);
4880 4879
4880 would_deadlock = 0;
4881
4881 if (nr_to_scan <= 0) 4882 if (nr_to_scan <= 0)
4882 break; 4883 break;
4883 } 4884 }
@@ -4896,11 +4897,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4896 &dev_priv->mm.inactive_list, 4897 &dev_priv->mm.inactive_list,
4897 list) { 4898 list) {
4898 if (nr_to_scan > 0) { 4899 if (nr_to_scan > 0) {
4899 struct drm_gem_object *obj = obj_priv->obj; 4900 i915_gem_object_unbind(obj_priv->obj);
4900 i915_gem_object_unbind(obj);
4901 if (i915_gem_object_is_purgeable(obj_priv))
4902 i915_gem_object_truncate(obj);
4903
4904 nr_to_scan--; 4901 nr_to_scan--;
4905 } else 4902 } else
4906 cnt++; 4903 cnt++;