aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-11-24 07:23:44 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2010-11-25 10:04:53 -0500
commit6299f992c0491232f008028a1f40bc9d86c4c76c (patch)
tree7c8d2a23fa6a749ffe5a918bb06c4b78d63a0e26 /drivers/gpu/drm/i915/i915_gem.c
parent2021746e1d5ad1e3b51e24480c566acbb833c7c1 (diff)
drm/i915: Defer accounting until read from debugfs
Simply remove our accounting of objects inside the aperture, keeping only track of what is in the aperture and its current usage. This removes the over-complication of BUGs that were attempting to keep the accounting correct and also removes the overhead of the accounting on the hot-paths. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c128
1 files changed, 22 insertions, 106 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f6167c55a649..7507a86c3feb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -83,80 +83,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 dev_priv->mm.object_memory -= size; 83 dev_priv->mm.object_memory -= size;
84} 84}
85 85
86static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
87 struct drm_i915_gem_object *obj)
88{
89 dev_priv->mm.gtt_count++;
90 dev_priv->mm.gtt_memory += obj->gtt_space->size;
91 if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
92 dev_priv->mm.mappable_gtt_used +=
93 min_t(size_t, obj->gtt_space->size,
94 dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
95 }
96 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
97}
98
99static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
100 struct drm_i915_gem_object *obj)
101{
102 dev_priv->mm.gtt_count--;
103 dev_priv->mm.gtt_memory -= obj->gtt_space->size;
104 if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
105 dev_priv->mm.mappable_gtt_used -=
106 min_t(size_t, obj->gtt_space->size,
107 dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
108 }
109 list_del_init(&obj->gtt_list);
110}
111
112/**
113 * Update the mappable working set counters. Call _only_ when there is a change
114 * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
115 * @mappable: new state the changed mappable flag (either pin_ or fault_).
116 */
117static void
118i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
119 struct drm_i915_gem_object *obj,
120 bool mappable)
121{
122 if (mappable) {
123 if (obj->pin_mappable && obj->fault_mappable)
124 /* Combined state was already mappable. */
125 return;
126 dev_priv->mm.gtt_mappable_count++;
127 dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
128 } else {
129 if (obj->pin_mappable || obj->fault_mappable)
130 /* Combined state still mappable. */
131 return;
132 dev_priv->mm.gtt_mappable_count--;
133 dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
134 }
135}
136
137static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
138 struct drm_i915_gem_object *obj,
139 bool mappable)
140{
141 dev_priv->mm.pin_count++;
142 dev_priv->mm.pin_memory += obj->gtt_space->size;
143 if (mappable) {
144 obj->pin_mappable = true;
145 i915_gem_info_update_mappable(dev_priv, obj, true);
146 }
147}
148
149static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
150 struct drm_i915_gem_object *obj)
151{
152 dev_priv->mm.pin_count--;
153 dev_priv->mm.pin_memory -= obj->gtt_space->size;
154 if (obj->pin_mappable) {
155 obj->pin_mappable = false;
156 i915_gem_info_update_mappable(dev_priv, obj, false);
157 }
158}
159
160int 86int
161i915_gem_check_is_wedged(struct drm_device *dev) 87i915_gem_check_is_wedged(struct drm_device *dev)
162{ 88{
@@ -253,19 +179,24 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
253{ 179{
254 struct drm_i915_private *dev_priv = dev->dev_private; 180 struct drm_i915_private *dev_priv = dev->dev_private;
255 struct drm_i915_gem_get_aperture *args = data; 181 struct drm_i915_gem_get_aperture *args = data;
182 struct drm_i915_gem_object *obj;
183 size_t pinned;
256 184
257 if (!(dev->driver->driver_features & DRIVER_GEM)) 185 if (!(dev->driver->driver_features & DRIVER_GEM))
258 return -ENODEV; 186 return -ENODEV;
259 187
188 pinned = 0;
260 mutex_lock(&dev->struct_mutex); 189 mutex_lock(&dev->struct_mutex);
261 args->aper_size = dev_priv->mm.gtt_total; 190 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
262 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; 191 pinned += obj->gtt_space->size;
263 mutex_unlock(&dev->struct_mutex); 192 mutex_unlock(&dev->struct_mutex);
264 193
194 args->aper_size = dev_priv->mm.gtt_total;
195 args->aper_available_size = args->aper_size -pinned;
196
265 return 0; 197 return 0;
266} 198}
267 199
268
269/** 200/**
270 * Creates a new mm object and returns a handle to it. 201 * Creates a new mm object and returns a handle to it.
271 */ 202 */
@@ -1267,14 +1198,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1267 1198
1268 /* Now bind it into the GTT if needed */ 1199 /* Now bind it into the GTT if needed */
1269 mutex_lock(&dev->struct_mutex); 1200 mutex_lock(&dev->struct_mutex);
1270 BUG_ON(obj->pin_count && !obj->pin_mappable);
1271 1201
1272 if (!obj->map_and_fenceable) { 1202 if (!obj->map_and_fenceable) {
1273 ret = i915_gem_object_unbind(obj); 1203 ret = i915_gem_object_unbind(obj);
1274 if (ret) 1204 if (ret)
1275 goto unlock; 1205 goto unlock;
1276 } 1206 }
1277
1278 if (!obj->gtt_space) { 1207 if (!obj->gtt_space) {
1279 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1208 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1280 if (ret) 1209 if (ret)
@@ -1285,11 +1214,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1285 if (ret) 1214 if (ret)
1286 goto unlock; 1215 goto unlock;
1287 1216
1288 if (!obj->fault_mappable) {
1289 obj->fault_mappable = true;
1290 i915_gem_info_update_mappable(dev_priv, obj, true);
1291 }
1292
1293 /* Need a new fence register? */ 1217 /* Need a new fence register? */
1294 if (obj->tiling_mode != I915_TILING_NONE) { 1218 if (obj->tiling_mode != I915_TILING_NONE) {
1295 ret = i915_gem_object_get_fence_reg(obj, true); 1219 ret = i915_gem_object_get_fence_reg(obj, true);
@@ -1300,6 +1224,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1300 if (i915_gem_object_is_inactive(obj)) 1224 if (i915_gem_object_is_inactive(obj))
1301 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1225 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1302 1226
1227 obj->fault_mappable = true;
1228
1303 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + 1229 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1304 page_offset; 1230 page_offset;
1305 1231
@@ -1406,18 +1332,14 @@ out_free_list:
1406void 1332void
1407i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1333i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1408{ 1334{
1409 struct drm_device *dev = obj->base.dev; 1335 if (!obj->fault_mappable)
1410 struct drm_i915_private *dev_priv = dev->dev_private; 1336 return;
1411 1337
1412 if (unlikely(obj->base.map_list.map && dev->dev_mapping)) 1338 unmap_mapping_range(obj->base.dev->dev_mapping,
1413 unmap_mapping_range(dev->dev_mapping, 1339 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1414 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, 1340 obj->base.size, 1);
1415 obj->base.size, 1);
1416 1341
1417 if (obj->fault_mappable) { 1342 obj->fault_mappable = false;
1418 obj->fault_mappable = false;
1419 i915_gem_info_update_mappable(dev_priv, obj, false);
1420 }
1421} 1343}
1422 1344
1423static void 1345static void
@@ -2221,8 +2143,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2221int 2143int
2222i915_gem_object_unbind(struct drm_i915_gem_object *obj) 2144i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2223{ 2145{
2224 struct drm_device *dev = obj->base.dev;
2225 struct drm_i915_private *dev_priv = dev->dev_private;
2226 int ret = 0; 2146 int ret = 0;
2227 2147
2228 if (obj->gtt_space == NULL) 2148 if (obj->gtt_space == NULL)
@@ -2259,10 +2179,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2259 i915_gem_clear_fence_reg(obj); 2179 i915_gem_clear_fence_reg(obj);
2260 2180
2261 i915_gem_gtt_unbind_object(obj); 2181 i915_gem_gtt_unbind_object(obj);
2262
2263 i915_gem_object_put_pages_gtt(obj); 2182 i915_gem_object_put_pages_gtt(obj);
2264 2183
2265 i915_gem_info_remove_gtt(dev_priv, obj); 2184 list_del_init(&obj->gtt_list);
2266 list_del_init(&obj->mm_list); 2185 list_del_init(&obj->mm_list);
2267 /* Avoid an unnecessary call to unbind on rebind. */ 2186 /* Avoid an unnecessary call to unbind on rebind. */
2268 obj->map_and_fenceable = true; 2187 obj->map_and_fenceable = true;
@@ -2833,11 +2752,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2833 goto search_free; 2752 goto search_free;
2834 } 2753 }
2835 2754
2836 obj->gtt_offset = obj->gtt_space->start; 2755 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2837
2838 /* keep track of bounds object by adding it to the inactive list */
2839 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2756 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2840 i915_gem_info_add_gtt(dev_priv, obj);
2841 2757
2842 /* Assert that the object is not currently in any GPU domain. As it 2758 /* Assert that the object is not currently in any GPU domain. As it
2843 * wasn't in the GTT, there shouldn't be any way it could have been in 2759 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2846,7 +2762,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2846 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2762 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2847 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2763 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2848 2764
2849 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); 2765 obj->gtt_offset = obj->gtt_space->start;
2850 2766
2851 fenceable = 2767 fenceable =
2852 obj->gtt_space->size == fence_size && 2768 obj->gtt_space->size == fence_size &&
@@ -2857,6 +2773,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2857 2773
2858 obj->map_and_fenceable = mappable && fenceable; 2774 obj->map_and_fenceable = mappable && fenceable;
2859 2775
2776 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
2860 return 0; 2777 return 0;
2861} 2778}
2862 2779
@@ -4372,12 +4289,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4372 } 4289 }
4373 4290
4374 if (obj->pin_count++ == 0) { 4291 if (obj->pin_count++ == 0) {
4375 i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable);
4376 if (!obj->active) 4292 if (!obj->active)
4377 list_move_tail(&obj->mm_list, 4293 list_move_tail(&obj->mm_list,
4378 &dev_priv->mm.pinned_list); 4294 &dev_priv->mm.pinned_list);
4379 } 4295 }
4380 BUG_ON(!obj->pin_mappable && map_and_fenceable); 4296 obj->pin_mappable |= map_and_fenceable;
4381 4297
4382 WARN_ON(i915_verify_lists(dev)); 4298 WARN_ON(i915_verify_lists(dev));
4383 return 0; 4299 return 0;
@@ -4397,7 +4313,7 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj)
4397 if (!obj->active) 4313 if (!obj->active)
4398 list_move_tail(&obj->mm_list, 4314 list_move_tail(&obj->mm_list,
4399 &dev_priv->mm.inactive_list); 4315 &dev_priv->mm.inactive_list);
4400 i915_gem_info_remove_pin(dev_priv, obj); 4316 obj->pin_mappable = false;
4401 } 4317 }
4402 WARN_ON(i915_verify_lists(dev)); 4318 WARN_ON(i915_verify_lists(dev));
4403} 4319}