diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-17 05:01:56 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-20 05:51:50 -0400 |
commit | 9af90d19f8a166694753b3f0558d3a8bcd66c0b5 (patch) | |
tree | 7433b151f076c956717fec54b2e42f14ef6723d8 /drivers | |
parent | 1d7cfea152cae6159aa30ceae38c3eaf13ea083c (diff) |
drm/i915: cache the last object lookup during pin_and_relocate()
The most frequent relocation within a batchbuffer is a contiguous sequence
of vertex buffer relocations, for which we can virtually eliminate the
drm_gem_object_lookup() overhead by caching the last handle to object
translation.
In doing so we refactor the pin and relocate retry loop out of
do_execbuffer into its own helper function and so improve the error
paths.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 315 |
1 files changed, 144 insertions, 171 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 34a07fc20513..f6a615ea3025 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2152,6 +2152,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2152 | 2152 | ||
2153 | drm_mm_put_block(obj_priv->gtt_space); | 2153 | drm_mm_put_block(obj_priv->gtt_space); |
2154 | obj_priv->gtt_space = NULL; | 2154 | obj_priv->gtt_space = NULL; |
2155 | obj_priv->gtt_offset = 0; | ||
2155 | 2156 | ||
2156 | if (i915_gem_object_is_purgeable(obj_priv)) | 2157 | if (i915_gem_object_is_purgeable(obj_priv)) |
2157 | i915_gem_object_truncate(obj); | 2158 | i915_gem_object_truncate(obj); |
@@ -2645,12 +2646,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2645 | search_free: | 2646 | search_free: |
2646 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 2647 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, |
2647 | obj->size, alignment, 0); | 2648 | obj->size, alignment, 0); |
2648 | if (free_space != NULL) { | 2649 | if (free_space != NULL) |
2649 | obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, | 2650 | obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, |
2650 | alignment); | 2651 | alignment); |
2651 | if (obj_priv->gtt_space != NULL) | ||
2652 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | ||
2653 | } | ||
2654 | if (obj_priv->gtt_space == NULL) { | 2652 | if (obj_priv->gtt_space == NULL) { |
2655 | /* If the gtt is empty and we're still having trouble | 2653 | /* If the gtt is empty and we're still having trouble |
2656 | * fitting our object in, we're out of memory. | 2654 | * fitting our object in, we're out of memory. |
@@ -2693,7 +2691,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2693 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2691 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
2694 | obj_priv->pages, | 2692 | obj_priv->pages, |
2695 | obj->size >> PAGE_SHIFT, | 2693 | obj->size >> PAGE_SHIFT, |
2696 | obj_priv->gtt_offset, | 2694 | obj_priv->gtt_space->start, |
2697 | obj_priv->agp_type); | 2695 | obj_priv->agp_type); |
2698 | if (obj_priv->agp_mem == NULL) { | 2696 | if (obj_priv->agp_mem == NULL) { |
2699 | i915_gem_object_put_pages(obj); | 2697 | i915_gem_object_put_pages(obj); |
@@ -2718,6 +2716,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2718 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2716 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); |
2719 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2717 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); |
2720 | 2718 | ||
2719 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | ||
2721 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); | 2720 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); |
2722 | 2721 | ||
2723 | return 0; | 2722 | return 0; |
@@ -3240,74 +3239,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3240 | * Pin an object to the GTT and evaluate the relocations landing in it. | 3239 | * Pin an object to the GTT and evaluate the relocations landing in it. |
3241 | */ | 3240 | */ |
3242 | static int | 3241 | static int |
3243 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3242 | i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, |
3244 | struct drm_file *file_priv, | 3243 | struct drm_file *file_priv, |
3245 | struct drm_i915_gem_exec_object2 *entry) | 3244 | struct drm_i915_gem_exec_object2 *entry) |
3246 | { | 3245 | { |
3247 | struct drm_device *dev = obj->dev; | 3246 | struct drm_device *dev = obj->base.dev; |
3248 | drm_i915_private_t *dev_priv = dev->dev_private; | 3247 | drm_i915_private_t *dev_priv = dev->dev_private; |
3249 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3250 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3248 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3251 | int i, ret; | 3249 | struct drm_gem_object *target_obj = NULL; |
3252 | bool need_fence; | 3250 | uint32_t target_handle = 0; |
3253 | 3251 | int i, ret = 0; | |
3254 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3255 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3256 | |||
3257 | /* Check fence reg constraints and rebind if necessary */ | ||
3258 | if (need_fence && | ||
3259 | !i915_gem_object_fence_offset_ok(obj, | ||
3260 | obj_priv->tiling_mode)) { | ||
3261 | ret = i915_gem_object_unbind(obj); | ||
3262 | if (ret) | ||
3263 | return ret; | ||
3264 | } | ||
3265 | |||
3266 | /* Choose the GTT offset for our buffer and put it there. */ | ||
3267 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | ||
3268 | if (ret) | ||
3269 | return ret; | ||
3270 | |||
3271 | /* | ||
3272 | * Pre-965 chips need a fence register set up in order to | ||
3273 | * properly handle blits to/from tiled surfaces. | ||
3274 | */ | ||
3275 | if (need_fence) { | ||
3276 | ret = i915_gem_object_get_fence_reg(obj, true); | ||
3277 | if (ret != 0) { | ||
3278 | i915_gem_object_unpin(obj); | ||
3279 | return ret; | ||
3280 | } | ||
3281 | 3252 | ||
3282 | dev_priv->fence_regs[obj_priv->fence_reg].gpu = true; | ||
3283 | } | ||
3284 | |||
3285 | entry->offset = obj_priv->gtt_offset; | ||
3286 | |||
3287 | /* Apply the relocations, using the GTT aperture to avoid cache | ||
3288 | * flushing requirements. | ||
3289 | */ | ||
3290 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | 3253 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
3291 | for (i = 0; i < entry->relocation_count; i++) { | 3254 | for (i = 0; i < entry->relocation_count; i++) { |
3292 | struct drm_i915_gem_relocation_entry reloc; | 3255 | struct drm_i915_gem_relocation_entry reloc; |
3293 | struct drm_gem_object *target_obj; | 3256 | uint32_t target_offset; |
3294 | struct drm_i915_gem_object *target_obj_priv; | ||
3295 | 3257 | ||
3296 | ret = __copy_from_user_inatomic(&reloc, | 3258 | if (__copy_from_user_inatomic(&reloc, |
3297 | user_relocs+i, | 3259 | user_relocs+i, |
3298 | sizeof(reloc)); | 3260 | sizeof(reloc))) { |
3299 | if (ret) { | 3261 | ret = -EFAULT; |
3300 | i915_gem_object_unpin(obj); | 3262 | break; |
3301 | return -EFAULT; | ||
3302 | } | 3263 | } |
3303 | 3264 | ||
3304 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, | 3265 | if (reloc.target_handle != target_handle) { |
3305 | reloc.target_handle); | 3266 | drm_gem_object_unreference(target_obj); |
3306 | if (target_obj == NULL) { | 3267 | |
3307 | i915_gem_object_unpin(obj); | 3268 | target_obj = drm_gem_object_lookup(dev, file_priv, |
3308 | return -ENOENT; | 3269 | reloc.target_handle); |
3270 | if (target_obj == NULL) { | ||
3271 | ret = -ENOENT; | ||
3272 | break; | ||
3273 | } | ||
3274 | |||
3275 | target_handle = reloc.target_handle; | ||
3309 | } | 3276 | } |
3310 | target_obj_priv = to_intel_bo(target_obj); | 3277 | target_offset = to_intel_bo(target_obj)->gtt_offset; |
3311 | 3278 | ||
3312 | #if WATCH_RELOC | 3279 | #if WATCH_RELOC |
3313 | DRM_INFO("%s: obj %p offset %08x target %d " | 3280 | DRM_INFO("%s: obj %p offset %08x target %d " |
@@ -3319,7 +3286,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3319 | (int) reloc.target_handle, | 3286 | (int) reloc.target_handle, |
3320 | (int) reloc.read_domains, | 3287 | (int) reloc.read_domains, |
3321 | (int) reloc.write_domain, | 3288 | (int) reloc.write_domain, |
3322 | (int) target_obj_priv->gtt_offset, | 3289 | (int) target_offset, |
3323 | (int) reloc.presumed_offset, | 3290 | (int) reloc.presumed_offset, |
3324 | reloc.delta); | 3291 | reloc.delta); |
3325 | #endif | 3292 | #endif |
@@ -3327,12 +3294,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3327 | /* The target buffer should have appeared before us in the | 3294 | /* The target buffer should have appeared before us in the |
3328 | * exec_object list, so it should have a GTT space bound by now. | 3295 | * exec_object list, so it should have a GTT space bound by now. |
3329 | */ | 3296 | */ |
3330 | if (target_obj_priv->gtt_space == NULL) { | 3297 | if (target_offset == 0) { |
3331 | DRM_ERROR("No GTT space found for object %d\n", | 3298 | DRM_ERROR("No GTT space found for object %d\n", |
3332 | reloc.target_handle); | 3299 | reloc.target_handle); |
3333 | drm_gem_object_unreference(target_obj); | 3300 | ret = -EINVAL; |
3334 | i915_gem_object_unpin(obj); | 3301 | break; |
3335 | return -EINVAL; | ||
3336 | } | 3302 | } |
3337 | 3303 | ||
3338 | /* Validate that the target is in a valid r/w GPU domain */ | 3304 | /* Validate that the target is in a valid r/w GPU domain */ |
@@ -3344,9 +3310,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3344 | (int) reloc.offset, | 3310 | (int) reloc.offset, |
3345 | reloc.read_domains, | 3311 | reloc.read_domains, |
3346 | reloc.write_domain); | 3312 | reloc.write_domain); |
3347 | drm_gem_object_unreference(target_obj); | 3313 | ret = -EINVAL; |
3348 | i915_gem_object_unpin(obj); | 3314 | break; |
3349 | return -EINVAL; | ||
3350 | } | 3315 | } |
3351 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | 3316 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || |
3352 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | 3317 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { |
@@ -3357,9 +3322,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3357 | (int) reloc.offset, | 3322 | (int) reloc.offset, |
3358 | reloc.read_domains, | 3323 | reloc.read_domains, |
3359 | reloc.write_domain); | 3324 | reloc.write_domain); |
3360 | drm_gem_object_unreference(target_obj); | 3325 | ret = -EINVAL; |
3361 | i915_gem_object_unpin(obj); | 3326 | break; |
3362 | return -EINVAL; | ||
3363 | } | 3327 | } |
3364 | if (reloc.write_domain && target_obj->pending_write_domain && | 3328 | if (reloc.write_domain && target_obj->pending_write_domain && |
3365 | reloc.write_domain != target_obj->pending_write_domain) { | 3329 | reloc.write_domain != target_obj->pending_write_domain) { |
@@ -3370,40 +3334,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3370 | (int) reloc.offset, | 3334 | (int) reloc.offset, |
3371 | reloc.write_domain, | 3335 | reloc.write_domain, |
3372 | target_obj->pending_write_domain); | 3336 | target_obj->pending_write_domain); |
3373 | drm_gem_object_unreference(target_obj); | 3337 | ret = -EINVAL; |
3374 | i915_gem_object_unpin(obj); | 3338 | break; |
3375 | return -EINVAL; | ||
3376 | } | 3339 | } |
3377 | 3340 | ||
3378 | target_obj->pending_read_domains |= reloc.read_domains; | 3341 | target_obj->pending_read_domains |= reloc.read_domains; |
3379 | target_obj->pending_write_domain |= reloc.write_domain; | 3342 | target_obj->pending_write_domain = reloc.write_domain; |
3380 | 3343 | ||
3381 | /* If the relocation already has the right value in it, no | 3344 | /* If the relocation already has the right value in it, no |
3382 | * more work needs to be done. | 3345 | * more work needs to be done. |
3383 | */ | 3346 | */ |
3384 | if (target_obj_priv->gtt_offset == reloc.presumed_offset) { | 3347 | if (target_offset == reloc.presumed_offset) |
3385 | drm_gem_object_unreference(target_obj); | ||
3386 | continue; | 3348 | continue; |
3387 | } | ||
3388 | 3349 | ||
3389 | /* Check that the relocation address is valid... */ | 3350 | /* Check that the relocation address is valid... */ |
3390 | if (reloc.offset > obj->size - 4) { | 3351 | if (reloc.offset > obj->base.size - 4) { |
3391 | DRM_ERROR("Relocation beyond object bounds: " | 3352 | DRM_ERROR("Relocation beyond object bounds: " |
3392 | "obj %p target %d offset %d size %d.\n", | 3353 | "obj %p target %d offset %d size %d.\n", |
3393 | obj, reloc.target_handle, | 3354 | obj, reloc.target_handle, |
3394 | (int) reloc.offset, (int) obj->size); | 3355 | (int) reloc.offset, (int) obj->base.size); |
3395 | drm_gem_object_unreference(target_obj); | 3356 | ret = -EINVAL; |
3396 | i915_gem_object_unpin(obj); | 3357 | break; |
3397 | return -EINVAL; | ||
3398 | } | 3358 | } |
3399 | if (reloc.offset & 3) { | 3359 | if (reloc.offset & 3) { |
3400 | DRM_ERROR("Relocation not 4-byte aligned: " | 3360 | DRM_ERROR("Relocation not 4-byte aligned: " |
3401 | "obj %p target %d offset %d.\n", | 3361 | "obj %p target %d offset %d.\n", |
3402 | obj, reloc.target_handle, | 3362 | obj, reloc.target_handle, |
3403 | (int) reloc.offset); | 3363 | (int) reloc.offset); |
3404 | drm_gem_object_unreference(target_obj); | 3364 | ret = -EINVAL; |
3405 | i915_gem_object_unpin(obj); | 3365 | break; |
3406 | return -EINVAL; | ||
3407 | } | 3366 | } |
3408 | 3367 | ||
3409 | /* and points to somewhere within the target object. */ | 3368 | /* and points to somewhere within the target object. */ |
@@ -3412,33 +3371,28 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3412 | "obj %p target %d delta %d size %d.\n", | 3371 | "obj %p target %d delta %d size %d.\n", |
3413 | obj, reloc.target_handle, | 3372 | obj, reloc.target_handle, |
3414 | (int) reloc.delta, (int) target_obj->size); | 3373 | (int) reloc.delta, (int) target_obj->size); |
3415 | drm_gem_object_unreference(target_obj); | 3374 | ret = -EINVAL; |
3416 | i915_gem_object_unpin(obj); | 3375 | break; |
3417 | return -EINVAL; | ||
3418 | } | 3376 | } |
3419 | 3377 | ||
3420 | reloc.delta += target_obj_priv->gtt_offset; | 3378 | reloc.delta += target_offset; |
3421 | if (obj->write_domain == I915_GEM_DOMAIN_CPU) { | 3379 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { |
3422 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; | 3380 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; |
3423 | char *vaddr; | 3381 | char *vaddr; |
3424 | 3382 | ||
3425 | vaddr = kmap_atomic(obj_priv->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); | 3383 | vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); |
3426 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; | 3384 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; |
3427 | kunmap_atomic(vaddr, KM_USER0); | 3385 | kunmap_atomic(vaddr, KM_USER0); |
3428 | } else { | 3386 | } else { |
3429 | uint32_t __iomem *reloc_entry; | 3387 | uint32_t __iomem *reloc_entry; |
3430 | void __iomem *reloc_page; | 3388 | void __iomem *reloc_page; |
3431 | int ret; | ||
3432 | 3389 | ||
3433 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 3390 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); |
3434 | if (ret) { | 3391 | if (ret) |
3435 | drm_gem_object_unreference(target_obj); | 3392 | break; |
3436 | i915_gem_object_unpin(obj); | ||
3437 | return ret; | ||
3438 | } | ||
3439 | 3393 | ||
3440 | /* Map the page containing the relocation we're going to perform. */ | 3394 | /* Map the page containing the relocation we're going to perform. */ |
3441 | reloc.offset += obj_priv->gtt_offset; | 3395 | reloc.offset += obj->gtt_offset; |
3442 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 3396 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
3443 | reloc.offset & PAGE_MASK, | 3397 | reloc.offset & PAGE_MASK, |
3444 | KM_USER0); | 3398 | KM_USER0); |
@@ -3447,8 +3401,74 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3447 | iowrite32(reloc.delta, reloc_entry); | 3401 | iowrite32(reloc.delta, reloc_entry); |
3448 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | 3402 | io_mapping_unmap_atomic(reloc_page, KM_USER0); |
3449 | } | 3403 | } |
3404 | } | ||
3405 | |||
3406 | drm_gem_object_unreference(target_obj); | ||
3407 | return ret; | ||
3408 | } | ||
3409 | |||
3410 | static int | ||
3411 | i915_gem_execbuffer_pin(struct drm_device *dev, | ||
3412 | struct drm_file *file, | ||
3413 | struct drm_gem_object **object_list, | ||
3414 | struct drm_i915_gem_exec_object2 *exec_list, | ||
3415 | int count) | ||
3416 | { | ||
3417 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3418 | int ret, i, retry; | ||
3419 | |||
3420 | /* attempt to pin all of the buffers into the GTT */ | ||
3421 | for (retry = 0; retry < 2; retry++) { | ||
3422 | ret = 0; | ||
3423 | for (i = 0; i < count; i++) { | ||
3424 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; | ||
3425 | struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); | ||
3426 | bool need_fence = | ||
3427 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3428 | obj->tiling_mode != I915_TILING_NONE; | ||
3429 | |||
3430 | /* Check fence reg constraints and rebind if necessary */ | ||
3431 | if (need_fence && | ||
3432 | !i915_gem_object_fence_offset_ok(&obj->base, | ||
3433 | obj->tiling_mode)) { | ||
3434 | ret = i915_gem_object_unbind(&obj->base); | ||
3435 | if (ret) | ||
3436 | break; | ||
3437 | } | ||
3438 | |||
3439 | ret = i915_gem_object_pin(&obj->base, entry->alignment); | ||
3440 | if (ret) | ||
3441 | break; | ||
3442 | |||
3443 | /* | ||
3444 | * Pre-965 chips need a fence register set up in order | ||
3445 | * to properly handle blits to/from tiled surfaces. | ||
3446 | */ | ||
3447 | if (need_fence) { | ||
3448 | ret = i915_gem_object_get_fence_reg(&obj->base, true); | ||
3449 | if (ret) { | ||
3450 | i915_gem_object_unpin(&obj->base); | ||
3451 | break; | ||
3452 | } | ||
3453 | |||
3454 | dev_priv->fence_regs[obj->fence_reg].gpu = true; | ||
3455 | } | ||
3456 | |||
3457 | entry->offset = obj->gtt_offset; | ||
3458 | } | ||
3450 | 3459 | ||
3451 | drm_gem_object_unreference(target_obj); | 3460 | while (i--) |
3461 | i915_gem_object_unpin(object_list[i]); | ||
3462 | |||
3463 | if (ret == 0) | ||
3464 | break; | ||
3465 | |||
3466 | if (ret != -ENOSPC || retry) | ||
3467 | return ret; | ||
3468 | |||
3469 | ret = i915_gem_evict_everything(dev); | ||
3470 | if (ret) | ||
3471 | return ret; | ||
3452 | } | 3472 | } |
3453 | 3473 | ||
3454 | return 0; | 3474 | return 0; |
@@ -3551,7 +3571,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
3551 | 3571 | ||
3552 | static int | 3572 | static int |
3553 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3573 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3554 | struct drm_file *file_priv, | 3574 | struct drm_file *file, |
3555 | struct drm_i915_gem_execbuffer2 *args, | 3575 | struct drm_i915_gem_execbuffer2 *args, |
3556 | struct drm_i915_gem_exec_object2 *exec_list) | 3576 | struct drm_i915_gem_exec_object2 *exec_list) |
3557 | { | 3577 | { |
@@ -3561,9 +3581,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3561 | struct drm_i915_gem_object *obj_priv; | 3581 | struct drm_i915_gem_object *obj_priv; |
3562 | struct drm_clip_rect *cliprects = NULL; | 3582 | struct drm_clip_rect *cliprects = NULL; |
3563 | struct drm_i915_gem_request *request = NULL; | 3583 | struct drm_i915_gem_request *request = NULL; |
3564 | int ret, i, pinned = 0; | 3584 | int ret, i, flips; |
3565 | uint64_t exec_offset; | 3585 | uint64_t exec_offset; |
3566 | int pin_tries, flips; | ||
3567 | 3586 | ||
3568 | struct intel_ring_buffer *ring = NULL; | 3587 | struct intel_ring_buffer *ring = NULL; |
3569 | 3588 | ||
@@ -3639,7 +3658,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3639 | 3658 | ||
3640 | /* Look up object handles */ | 3659 | /* Look up object handles */ |
3641 | for (i = 0; i < args->buffer_count; i++) { | 3660 | for (i = 0; i < args->buffer_count; i++) { |
3642 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 3661 | object_list[i] = drm_gem_object_lookup(dev, file, |
3643 | exec_list[i].handle); | 3662 | exec_list[i].handle); |
3644 | if (object_list[i] == NULL) { | 3663 | if (object_list[i] == NULL) { |
3645 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3664 | DRM_ERROR("Invalid object handle %d at index %d\n", |
@@ -3662,63 +3681,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3662 | obj_priv->in_execbuffer = true; | 3681 | obj_priv->in_execbuffer = true; |
3663 | } | 3682 | } |
3664 | 3683 | ||
3665 | /* Pin and relocate */ | 3684 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
3666 | for (pin_tries = 0; ; pin_tries++) { | 3685 | ret = i915_gem_execbuffer_pin(dev, file, |
3667 | ret = 0; | 3686 | object_list, exec_list, |
3668 | 3687 | args->buffer_count); | |
3669 | for (i = 0; i < args->buffer_count; i++) { | 3688 | if (ret) |
3670 | object_list[i]->pending_read_domains = 0; | 3689 | goto err; |
3671 | object_list[i]->pending_write_domain = 0; | ||
3672 | ret = i915_gem_object_pin_and_relocate(object_list[i], | ||
3673 | file_priv, | ||
3674 | &exec_list[i]); | ||
3675 | if (ret) | ||
3676 | break; | ||
3677 | pinned = i + 1; | ||
3678 | } | ||
3679 | /* success */ | ||
3680 | if (ret == 0) | ||
3681 | break; | ||
3682 | |||
3683 | /* error other than GTT full, or we've already tried again */ | ||
3684 | if (ret != -ENOSPC || pin_tries >= 1) { | ||
3685 | if (ret != -ERESTARTSYS) { | ||
3686 | unsigned long long total_size = 0; | ||
3687 | int num_fences = 0; | ||
3688 | for (i = 0; i < args->buffer_count; i++) { | ||
3689 | obj_priv = to_intel_bo(object_list[i]); | ||
3690 | |||
3691 | total_size += object_list[i]->size; | ||
3692 | num_fences += | ||
3693 | exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3694 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3695 | } | ||
3696 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", | ||
3697 | pinned+1, args->buffer_count, | ||
3698 | total_size, num_fences, | ||
3699 | ret); | ||
3700 | DRM_ERROR("%u objects [%u pinned, %u GTT], " | ||
3701 | "%zu object bytes [%zu pinned], " | ||
3702 | "%zu /%zu gtt bytes\n", | ||
3703 | dev_priv->mm.object_count, | ||
3704 | dev_priv->mm.pin_count, | ||
3705 | dev_priv->mm.gtt_count, | ||
3706 | dev_priv->mm.object_memory, | ||
3707 | dev_priv->mm.pin_memory, | ||
3708 | dev_priv->mm.gtt_memory, | ||
3709 | dev_priv->mm.gtt_total); | ||
3710 | } | ||
3711 | goto err; | ||
3712 | } | ||
3713 | |||
3714 | /* unpin all of our buffers */ | ||
3715 | for (i = 0; i < pinned; i++) | ||
3716 | i915_gem_object_unpin(object_list[i]); | ||
3717 | pinned = 0; | ||
3718 | 3690 | ||
3719 | /* evict everyone we can from the aperture */ | 3691 | /* The objects are in their final locations, apply the relocations. */ |
3720 | ret = i915_gem_evict_everything(dev); | 3692 | for (i = 0; i < args->buffer_count; i++) { |
3721 | if (ret && ret != -ENOSPC) | 3693 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); |
3694 | obj->base.pending_read_domains = 0; | ||
3695 | obj->base.pending_write_domain = 0; | ||
3696 | ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); | ||
3697 | if (ret) | ||
3722 | goto err; | 3698 | goto err; |
3723 | } | 3699 | } |
3724 | 3700 | ||
@@ -3731,9 +3707,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3731 | } | 3707 | } |
3732 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | 3708 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
3733 | 3709 | ||
3734 | /* Sanity check the batch buffer, prior to moving objects */ | 3710 | /* Sanity check the batch buffer */ |
3735 | exec_offset = exec_list[args->buffer_count - 1].offset; | 3711 | exec_offset = to_intel_bo(batch_obj)->gtt_offset; |
3736 | ret = i915_gem_check_execbuffer (args, exec_offset); | 3712 | ret = i915_gem_check_execbuffer(args, exec_offset); |
3737 | if (ret != 0) { | 3713 | if (ret != 0) { |
3738 | DRM_ERROR("execbuf with invalid offset/length\n"); | 3714 | DRM_ERROR("execbuf with invalid offset/length\n"); |
3739 | goto err; | 3715 | goto err; |
@@ -3761,7 +3737,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3761 | dev->invalidate_domains, | 3737 | dev->invalidate_domains, |
3762 | dev->flush_domains); | 3738 | dev->flush_domains); |
3763 | #endif | 3739 | #endif |
3764 | i915_gem_flush(dev, file_priv, | 3740 | i915_gem_flush(dev, file, |
3765 | dev->invalidate_domains, | 3741 | dev->invalidate_domains, |
3766 | dev->flush_domains, | 3742 | dev->flush_domains, |
3767 | dev_priv->mm.flush_rings); | 3743 | dev_priv->mm.flush_rings); |
@@ -3846,13 +3822,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3846 | i915_gem_object_move_to_active(obj, ring); | 3822 | i915_gem_object_move_to_active(obj, ring); |
3847 | } | 3823 | } |
3848 | 3824 | ||
3849 | i915_add_request(dev, file_priv, request, ring); | 3825 | i915_add_request(dev, file, request, ring); |
3850 | request = NULL; | 3826 | request = NULL; |
3851 | 3827 | ||
3852 | err: | 3828 | err: |
3853 | for (i = 0; i < pinned; i++) | ||
3854 | i915_gem_object_unpin(object_list[i]); | ||
3855 | |||
3856 | for (i = 0; i < args->buffer_count; i++) { | 3829 | for (i = 0; i < args->buffer_count; i++) { |
3857 | if (object_list[i]) { | 3830 | if (object_list[i]) { |
3858 | obj_priv = to_intel_bo(object_list[i]); | 3831 | obj_priv = to_intel_bo(object_list[i]); |