aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-11-24 07:23:44 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2010-11-25 10:04:53 -0500
commit6299f992c0491232f008028a1f40bc9d86c4c76c (patch)
tree7c8d2a23fa6a749ffe5a918bb06c4b78d63a0e26 /drivers/gpu
parent2021746e1d5ad1e3b51e24480c566acbb833c7c1 (diff)
drm/i915: Defer accounting until read from debugfs
Simply remove our accounting of objects inside the aperture, keeping only track of what is in the aperture and its current usage. This removes the over-complication of BUGs that were attempting to keep the accounting correct and also removes the overhead of the accounting on the hot-paths. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c84
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c128
3 files changed, 95 insertions, 128 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5faae476954c..3c9d4b876865 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -128,8 +128,15 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
128 if (obj->gtt_space != NULL) 128 if (obj->gtt_space != NULL)
129 seq_printf(m, " (gtt offset: %08x, size: %08x)", 129 seq_printf(m, " (gtt offset: %08x, size: %08x)",
130 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 130 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
131 if (obj->pin_mappable || obj->fault_mappable) 131 if (obj->pin_mappable || obj->fault_mappable) {
132 seq_printf(m, " (mappable)"); 132 char s[3], *t = s;
133 if (obj->pin_mappable)
134 *t++ = 'p';
135 if (obj->fault_mappable)
136 *t++ = 'f';
137 *t = '\0';
138 seq_printf(m, " (%s mappable)", s);
139 }
133 if (obj->ring != NULL) 140 if (obj->ring != NULL)
134 seq_printf(m, " (%s)", obj->ring->name); 141 seq_printf(m, " (%s)", obj->ring->name);
135} 142}
@@ -191,28 +198,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
191 return 0; 198 return 0;
192} 199}
193 200
201#define count_objects(list, member) do { \
202 list_for_each_entry(obj, list, member) { \
203 size += obj->gtt_space->size; \
204 ++count; \
205 if (obj->map_and_fenceable) { \
206 mappable_size += obj->gtt_space->size; \
207 ++mappable_count; \
208 } \
209 } \
210} while(0)
211
194static int i915_gem_object_info(struct seq_file *m, void* data) 212static int i915_gem_object_info(struct seq_file *m, void* data)
195{ 213{
196 struct drm_info_node *node = (struct drm_info_node *) m->private; 214 struct drm_info_node *node = (struct drm_info_node *) m->private;
197 struct drm_device *dev = node->minor->dev; 215 struct drm_device *dev = node->minor->dev;
198 struct drm_i915_private *dev_priv = dev->dev_private; 216 struct drm_i915_private *dev_priv = dev->dev_private;
217 u32 count, mappable_count;
218 size_t size, mappable_size;
219 struct drm_i915_gem_object *obj;
199 int ret; 220 int ret;
200 221
201 ret = mutex_lock_interruptible(&dev->struct_mutex); 222 ret = mutex_lock_interruptible(&dev->struct_mutex);
202 if (ret) 223 if (ret)
203 return ret; 224 return ret;
204 225
205 seq_printf(m, "%u objects\n", dev_priv->mm.object_count); 226 seq_printf(m, "%u objects, %zu bytes\n",
206 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); 227 dev_priv->mm.object_count,
207 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); 228 dev_priv->mm.object_memory);
208 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); 229
209 seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count); 230 size = count = mappable_size = mappable_count = 0;
210 seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory); 231 count_objects(&dev_priv->mm.gtt_list, gtt_list);
211 seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used); 232 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
212 seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total); 233 count, mappable_count, size, mappable_size);
213 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); 234
214 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); 235 size = count = mappable_size = mappable_count = 0;
215 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); 236 count_objects(&dev_priv->mm.active_list, mm_list);
237 count_objects(&dev_priv->mm.flushing_list, mm_list);
238 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
239 count, mappable_count, size, mappable_size);
240
241 size = count = mappable_size = mappable_count = 0;
242 count_objects(&dev_priv->mm.pinned_list, mm_list);
243 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
244 count, mappable_count, size, mappable_size);
245
246 size = count = mappable_size = mappable_count = 0;
247 count_objects(&dev_priv->mm.inactive_list, mm_list);
248 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
249 count, mappable_count, size, mappable_size);
250
251 size = count = mappable_size = mappable_count = 0;
252 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
253 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
254 count, mappable_count, size, mappable_size);
255
256 size = count = mappable_size = mappable_count = 0;
257 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
258 if (obj->fault_mappable) {
259 size += obj->gtt_space->size;
260 ++count;
261 }
262 if (obj->pin_mappable) {
263 mappable_size += obj->gtt_space->size;
264 ++mappable_count;
265 }
266 }
267 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
268 mappable_count, mappable_size);
269 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
270 count, size);
271
272 seq_printf(m, "%zu [%zu] gtt total\n",
273 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
216 274
217 mutex_unlock(&dev->struct_mutex); 275 mutex_unlock(&dev->struct_mutex);
218 276
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b6ca10ade426..4ad34f9c55a2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -642,17 +642,10 @@ typedef struct drm_i915_private {
642 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 642 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
643 643
644 /* accounting, useful for userland debugging */ 644 /* accounting, useful for userland debugging */
645 size_t object_memory;
646 size_t pin_memory;
647 size_t gtt_memory;
648 size_t gtt_mappable_memory;
649 size_t mappable_gtt_used;
650 size_t mappable_gtt_total;
651 size_t gtt_total; 645 size_t gtt_total;
646 size_t mappable_gtt_total;
647 size_t object_memory;
652 u32 object_count; 648 u32 object_count;
653 u32 pin_count;
654 u32 gtt_mappable_count;
655 u32 gtt_count;
656 } mm; 649 } mm;
657 struct sdvo_device_mapping sdvo_mappings[2]; 650 struct sdvo_device_mapping sdvo_mappings[2];
658 /* indicate whether the LVDS_BORDER should be enabled or not */ 651 /* indicate whether the LVDS_BORDER should be enabled or not */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f6167c55a649..7507a86c3feb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -83,80 +83,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 dev_priv->mm.object_memory -= size; 83 dev_priv->mm.object_memory -= size;
84} 84}
85 85
86static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
87 struct drm_i915_gem_object *obj)
88{
89 dev_priv->mm.gtt_count++;
90 dev_priv->mm.gtt_memory += obj->gtt_space->size;
91 if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
92 dev_priv->mm.mappable_gtt_used +=
93 min_t(size_t, obj->gtt_space->size,
94 dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
95 }
96 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
97}
98
99static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
100 struct drm_i915_gem_object *obj)
101{
102 dev_priv->mm.gtt_count--;
103 dev_priv->mm.gtt_memory -= obj->gtt_space->size;
104 if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
105 dev_priv->mm.mappable_gtt_used -=
106 min_t(size_t, obj->gtt_space->size,
107 dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
108 }
109 list_del_init(&obj->gtt_list);
110}
111
112/**
113 * Update the mappable working set counters. Call _only_ when there is a change
114 * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
115 * @mappable: new state the changed mappable flag (either pin_ or fault_).
116 */
117static void
118i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
119 struct drm_i915_gem_object *obj,
120 bool mappable)
121{
122 if (mappable) {
123 if (obj->pin_mappable && obj->fault_mappable)
124 /* Combined state was already mappable. */
125 return;
126 dev_priv->mm.gtt_mappable_count++;
127 dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
128 } else {
129 if (obj->pin_mappable || obj->fault_mappable)
130 /* Combined state still mappable. */
131 return;
132 dev_priv->mm.gtt_mappable_count--;
133 dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
134 }
135}
136
137static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
138 struct drm_i915_gem_object *obj,
139 bool mappable)
140{
141 dev_priv->mm.pin_count++;
142 dev_priv->mm.pin_memory += obj->gtt_space->size;
143 if (mappable) {
144 obj->pin_mappable = true;
145 i915_gem_info_update_mappable(dev_priv, obj, true);
146 }
147}
148
149static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
150 struct drm_i915_gem_object *obj)
151{
152 dev_priv->mm.pin_count--;
153 dev_priv->mm.pin_memory -= obj->gtt_space->size;
154 if (obj->pin_mappable) {
155 obj->pin_mappable = false;
156 i915_gem_info_update_mappable(dev_priv, obj, false);
157 }
158}
159
160int 86int
161i915_gem_check_is_wedged(struct drm_device *dev) 87i915_gem_check_is_wedged(struct drm_device *dev)
162{ 88{
@@ -253,19 +179,24 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
253{ 179{
254 struct drm_i915_private *dev_priv = dev->dev_private; 180 struct drm_i915_private *dev_priv = dev->dev_private;
255 struct drm_i915_gem_get_aperture *args = data; 181 struct drm_i915_gem_get_aperture *args = data;
182 struct drm_i915_gem_object *obj;
183 size_t pinned;
256 184
257 if (!(dev->driver->driver_features & DRIVER_GEM)) 185 if (!(dev->driver->driver_features & DRIVER_GEM))
258 return -ENODEV; 186 return -ENODEV;
259 187
188 pinned = 0;
260 mutex_lock(&dev->struct_mutex); 189 mutex_lock(&dev->struct_mutex);
261 args->aper_size = dev_priv->mm.gtt_total; 190 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
262 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; 191 pinned += obj->gtt_space->size;
263 mutex_unlock(&dev->struct_mutex); 192 mutex_unlock(&dev->struct_mutex);
264 193
194 args->aper_size = dev_priv->mm.gtt_total;
195 args->aper_available_size = args->aper_size -pinned;
196
265 return 0; 197 return 0;
266} 198}
267 199
268
269/** 200/**
270 * Creates a new mm object and returns a handle to it. 201 * Creates a new mm object and returns a handle to it.
271 */ 202 */
@@ -1267,14 +1198,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1267 1198
1268 /* Now bind it into the GTT if needed */ 1199 /* Now bind it into the GTT if needed */
1269 mutex_lock(&dev->struct_mutex); 1200 mutex_lock(&dev->struct_mutex);
1270 BUG_ON(obj->pin_count && !obj->pin_mappable);
1271 1201
1272 if (!obj->map_and_fenceable) { 1202 if (!obj->map_and_fenceable) {
1273 ret = i915_gem_object_unbind(obj); 1203 ret = i915_gem_object_unbind(obj);
1274 if (ret) 1204 if (ret)
1275 goto unlock; 1205 goto unlock;
1276 } 1206 }
1277
1278 if (!obj->gtt_space) { 1207 if (!obj->gtt_space) {
1279 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1208 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1280 if (ret) 1209 if (ret)
@@ -1285,11 +1214,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1285 if (ret) 1214 if (ret)
1286 goto unlock; 1215 goto unlock;
1287 1216
1288 if (!obj->fault_mappable) {
1289 obj->fault_mappable = true;
1290 i915_gem_info_update_mappable(dev_priv, obj, true);
1291 }
1292
1293 /* Need a new fence register? */ 1217 /* Need a new fence register? */
1294 if (obj->tiling_mode != I915_TILING_NONE) { 1218 if (obj->tiling_mode != I915_TILING_NONE) {
1295 ret = i915_gem_object_get_fence_reg(obj, true); 1219 ret = i915_gem_object_get_fence_reg(obj, true);
@@ -1300,6 +1224,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1300 if (i915_gem_object_is_inactive(obj)) 1224 if (i915_gem_object_is_inactive(obj))
1301 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1225 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1302 1226
1227 obj->fault_mappable = true;
1228
1303 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + 1229 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1304 page_offset; 1230 page_offset;
1305 1231
@@ -1406,18 +1332,14 @@ out_free_list:
1406void 1332void
1407i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1333i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1408{ 1334{
1409 struct drm_device *dev = obj->base.dev; 1335 if (!obj->fault_mappable)
1410 struct drm_i915_private *dev_priv = dev->dev_private; 1336 return;
1411 1337
1412 if (unlikely(obj->base.map_list.map && dev->dev_mapping)) 1338 unmap_mapping_range(obj->base.dev->dev_mapping,
1413 unmap_mapping_range(dev->dev_mapping, 1339 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1414 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, 1340 obj->base.size, 1);
1415 obj->base.size, 1);
1416 1341
1417 if (obj->fault_mappable) { 1342 obj->fault_mappable = false;
1418 obj->fault_mappable = false;
1419 i915_gem_info_update_mappable(dev_priv, obj, false);
1420 }
1421} 1343}
1422 1344
1423static void 1345static void
@@ -2221,8 +2143,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2221int 2143int
2222i915_gem_object_unbind(struct drm_i915_gem_object *obj) 2144i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2223{ 2145{
2224 struct drm_device *dev = obj->base.dev;
2225 struct drm_i915_private *dev_priv = dev->dev_private;
2226 int ret = 0; 2146 int ret = 0;
2227 2147
2228 if (obj->gtt_space == NULL) 2148 if (obj->gtt_space == NULL)
@@ -2259,10 +2179,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2259 i915_gem_clear_fence_reg(obj); 2179 i915_gem_clear_fence_reg(obj);
2260 2180
2261 i915_gem_gtt_unbind_object(obj); 2181 i915_gem_gtt_unbind_object(obj);
2262
2263 i915_gem_object_put_pages_gtt(obj); 2182 i915_gem_object_put_pages_gtt(obj);
2264 2183
2265 i915_gem_info_remove_gtt(dev_priv, obj); 2184 list_del_init(&obj->gtt_list);
2266 list_del_init(&obj->mm_list); 2185 list_del_init(&obj->mm_list);
2267 /* Avoid an unnecessary call to unbind on rebind. */ 2186 /* Avoid an unnecessary call to unbind on rebind. */
2268 obj->map_and_fenceable = true; 2187 obj->map_and_fenceable = true;
@@ -2833,11 +2752,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2833 goto search_free; 2752 goto search_free;
2834 } 2753 }
2835 2754
2836 obj->gtt_offset = obj->gtt_space->start; 2755 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2837
2838 /* keep track of bounds object by adding it to the inactive list */
2839 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2756 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2840 i915_gem_info_add_gtt(dev_priv, obj);
2841 2757
2842 /* Assert that the object is not currently in any GPU domain. As it 2758 /* Assert that the object is not currently in any GPU domain. As it
2843 * wasn't in the GTT, there shouldn't be any way it could have been in 2759 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2846,7 +2762,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2846 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2762 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2847 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2763 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2848 2764
2849 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); 2765 obj->gtt_offset = obj->gtt_space->start;
2850 2766
2851 fenceable = 2767 fenceable =
2852 obj->gtt_space->size == fence_size && 2768 obj->gtt_space->size == fence_size &&
@@ -2857,6 +2773,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2857 2773
2858 obj->map_and_fenceable = mappable && fenceable; 2774 obj->map_and_fenceable = mappable && fenceable;
2859 2775
2776 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
2860 return 0; 2777 return 0;
2861} 2778}
2862 2779
@@ -4372,12 +4289,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4372 } 4289 }
4373 4290
4374 if (obj->pin_count++ == 0) { 4291 if (obj->pin_count++ == 0) {
4375 i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable);
4376 if (!obj->active) 4292 if (!obj->active)
4377 list_move_tail(&obj->mm_list, 4293 list_move_tail(&obj->mm_list,
4378 &dev_priv->mm.pinned_list); 4294 &dev_priv->mm.pinned_list);
4379 } 4295 }
4380 BUG_ON(!obj->pin_mappable && map_and_fenceable); 4296 obj->pin_mappable |= map_and_fenceable;
4381 4297
4382 WARN_ON(i915_verify_lists(dev)); 4298 WARN_ON(i915_verify_lists(dev));
4383 return 0; 4299 return 0;
@@ -4397,7 +4313,7 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj)
4397 if (!obj->active) 4313 if (!obj->active)
4398 list_move_tail(&obj->mm_list, 4314 list_move_tail(&obj->mm_list,
4399 &dev_priv->mm.inactive_list); 4315 &dev_priv->mm.inactive_list);
4400 i915_gem_info_remove_pin(dev_priv, obj); 4316 obj->pin_mappable = false;
4401 } 4317 }
4402 WARN_ON(i915_verify_lists(dev)); 4318 WARN_ON(i915_verify_lists(dev));
4403} 4319}