diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-21 17:07:12 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-23 15:11:43 -0500 |
commit | bcf50e2775bbc3101932d8e4ab8c7902aa4163b4 (patch) | |
tree | fa83645166876c378d911a8db8e6433e89a52179 /drivers/gpu | |
parent | da79de97d254145dcb7c08c978b1093eac15ec9c (diff) |
drm/i915: Handle pagefaults in execbuffer user relocations
Currently if we hit a pagefault when applying a user relocation for the
execbuffer, we bail and return EFAULT to the application. Instead, we
need to unwind, drop the dev->struct_mutex, copy all the relocation
entries to a vmalloc array (to avoid any potential circular deadlocks
when resolving the pagefault), retake the mutex and then apply the
relocations. Afterwards, we need to again drop the lock and copy the
vmalloc array back to userspace.
v2: Incorporate feedback from Daniel Vetter.
Reported-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 461 |
1 files changed, 293 insertions, 168 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bc4164590054..92b097dbe4ff 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3254,192 +3254,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3254 | return 0; | 3254 | return 0; |
3255 | } | 3255 | } |
3256 | 3256 | ||
3257 | /** | ||
3258 | * Pin an object to the GTT and evaluate the relocations landing in it. | ||
3259 | */ | ||
3260 | static int | 3257 | static int |
3261 | i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, | 3258 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
3262 | struct drm_file *file_priv, | 3259 | struct drm_file *file_priv, |
3263 | struct drm_i915_gem_exec_object2 *entry) | 3260 | struct drm_i915_gem_exec_object2 *entry, |
3261 | struct drm_i915_gem_relocation_entry *reloc) | ||
3264 | { | 3262 | { |
3265 | struct drm_device *dev = obj->base.dev; | 3263 | struct drm_device *dev = obj->base.dev; |
3266 | drm_i915_private_t *dev_priv = dev->dev_private; | 3264 | struct drm_gem_object *target_obj; |
3267 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3265 | uint32_t target_offset; |
3268 | struct drm_gem_object *target_obj = NULL; | 3266 | int ret = -EINVAL; |
3269 | uint32_t target_handle = 0; | ||
3270 | int i, ret = 0; | ||
3271 | 3267 | ||
3272 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | 3268 | target_obj = drm_gem_object_lookup(dev, file_priv, |
3273 | for (i = 0; i < entry->relocation_count; i++) { | 3269 | reloc->target_handle); |
3274 | struct drm_i915_gem_relocation_entry reloc; | 3270 | if (target_obj == NULL) |
3275 | uint32_t target_offset; | 3271 | return -ENOENT; |
3276 | 3272 | ||
3277 | if (__copy_from_user_inatomic(&reloc, | 3273 | target_offset = to_intel_bo(target_obj)->gtt_offset; |
3278 | user_relocs+i, | ||
3279 | sizeof(reloc))) { | ||
3280 | ret = -EFAULT; | ||
3281 | break; | ||
3282 | } | ||
3283 | 3274 | ||
3284 | if (reloc.target_handle != target_handle) { | 3275 | #if WATCH_RELOC |
3285 | drm_gem_object_unreference(target_obj); | 3276 | DRM_INFO("%s: obj %p offset %08x target %d " |
3277 | "read %08x write %08x gtt %08x " | ||
3278 | "presumed %08x delta %08x\n", | ||
3279 | __func__, | ||
3280 | obj, | ||
3281 | (int) reloc->offset, | ||
3282 | (int) reloc->target_handle, | ||
3283 | (int) reloc->read_domains, | ||
3284 | (int) reloc->write_domain, | ||
3285 | (int) target_offset, | ||
3286 | (int) reloc->presumed_offset, | ||
3287 | reloc->delta); | ||
3288 | #endif | ||
3286 | 3289 | ||
3287 | target_obj = drm_gem_object_lookup(dev, file_priv, | 3290 | /* The target buffer should have appeared before us in the |
3288 | reloc.target_handle); | 3291 | * exec_object list, so it should have a GTT space bound by now. |
3289 | if (target_obj == NULL) { | 3292 | */ |
3290 | ret = -ENOENT; | 3293 | if (target_offset == 0) { |
3291 | break; | 3294 | DRM_ERROR("No GTT space found for object %d\n", |
3292 | } | 3295 | reloc->target_handle); |
3296 | goto err; | ||
3297 | } | ||
3293 | 3298 | ||
3294 | target_handle = reloc.target_handle; | 3299 | /* Validate that the target is in a valid r/w GPU domain */ |
3295 | } | 3300 | if (reloc->write_domain & (reloc->write_domain - 1)) { |
3296 | target_offset = to_intel_bo(target_obj)->gtt_offset; | 3301 | DRM_ERROR("reloc with multiple write domains: " |
3302 | "obj %p target %d offset %d " | ||
3303 | "read %08x write %08x", | ||
3304 | obj, reloc->target_handle, | ||
3305 | (int) reloc->offset, | ||
3306 | reloc->read_domains, | ||
3307 | reloc->write_domain); | ||
3308 | goto err; | ||
3309 | } | ||
3310 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | ||
3311 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | ||
3312 | DRM_ERROR("reloc with read/write CPU domains: " | ||
3313 | "obj %p target %d offset %d " | ||
3314 | "read %08x write %08x", | ||
3315 | obj, reloc->target_handle, | ||
3316 | (int) reloc->offset, | ||
3317 | reloc->read_domains, | ||
3318 | reloc->write_domain); | ||
3319 | goto err; | ||
3320 | } | ||
3321 | if (reloc->write_domain && target_obj->pending_write_domain && | ||
3322 | reloc->write_domain != target_obj->pending_write_domain) { | ||
3323 | DRM_ERROR("Write domain conflict: " | ||
3324 | "obj %p target %d offset %d " | ||
3325 | "new %08x old %08x\n", | ||
3326 | obj, reloc->target_handle, | ||
3327 | (int) reloc->offset, | ||
3328 | reloc->write_domain, | ||
3329 | target_obj->pending_write_domain); | ||
3330 | goto err; | ||
3331 | } | ||
3297 | 3332 | ||
3298 | #if WATCH_RELOC | 3333 | target_obj->pending_read_domains |= reloc->read_domains; |
3299 | DRM_INFO("%s: obj %p offset %08x target %d " | 3334 | target_obj->pending_write_domain |= reloc->write_domain; |
3300 | "read %08x write %08x gtt %08x " | ||
3301 | "presumed %08x delta %08x\n", | ||
3302 | __func__, | ||
3303 | obj, | ||
3304 | (int) reloc.offset, | ||
3305 | (int) reloc.target_handle, | ||
3306 | (int) reloc.read_domains, | ||
3307 | (int) reloc.write_domain, | ||
3308 | (int) target_offset, | ||
3309 | (int) reloc.presumed_offset, | ||
3310 | reloc.delta); | ||
3311 | #endif | ||
3312 | 3335 | ||
3313 | /* The target buffer should have appeared before us in the | 3336 | /* If the relocation already has the right value in it, no |
3314 | * exec_object list, so it should have a GTT space bound by now. | 3337 | * more work needs to be done. |
3315 | */ | 3338 | */ |
3316 | if (target_offset == 0) { | 3339 | if (target_offset == reloc->presumed_offset) |
3317 | DRM_ERROR("No GTT space found for object %d\n", | 3340 | goto out; |
3318 | reloc.target_handle); | ||
3319 | ret = -EINVAL; | ||
3320 | break; | ||
3321 | } | ||
3322 | 3341 | ||
3323 | /* Validate that the target is in a valid r/w GPU domain */ | 3342 | /* Check that the relocation address is valid... */ |
3324 | if (reloc.write_domain & (reloc.write_domain - 1)) { | 3343 | if (reloc->offset > obj->base.size - 4) { |
3325 | DRM_ERROR("reloc with multiple write domains: " | 3344 | DRM_ERROR("Relocation beyond object bounds: " |
3326 | "obj %p target %d offset %d " | 3345 | "obj %p target %d offset %d size %d.\n", |
3327 | "read %08x write %08x", | 3346 | obj, reloc->target_handle, |
3328 | obj, reloc.target_handle, | 3347 | (int) reloc->offset, |
3329 | (int) reloc.offset, | 3348 | (int) obj->base.size); |
3330 | reloc.read_domains, | 3349 | goto err; |
3331 | reloc.write_domain); | 3350 | } |
3332 | ret = -EINVAL; | 3351 | if (reloc->offset & 3) { |
3333 | break; | 3352 | DRM_ERROR("Relocation not 4-byte aligned: " |
3334 | } | 3353 | "obj %p target %d offset %d.\n", |
3335 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | 3354 | obj, reloc->target_handle, |
3336 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | 3355 | (int) reloc->offset); |
3337 | DRM_ERROR("reloc with read/write CPU domains: " | 3356 | goto err; |
3338 | "obj %p target %d offset %d " | 3357 | } |
3339 | "read %08x write %08x", | ||
3340 | obj, reloc.target_handle, | ||
3341 | (int) reloc.offset, | ||
3342 | reloc.read_domains, | ||
3343 | reloc.write_domain); | ||
3344 | ret = -EINVAL; | ||
3345 | break; | ||
3346 | } | ||
3347 | if (reloc.write_domain && target_obj->pending_write_domain && | ||
3348 | reloc.write_domain != target_obj->pending_write_domain) { | ||
3349 | DRM_ERROR("Write domain conflict: " | ||
3350 | "obj %p target %d offset %d " | ||
3351 | "new %08x old %08x\n", | ||
3352 | obj, reloc.target_handle, | ||
3353 | (int) reloc.offset, | ||
3354 | reloc.write_domain, | ||
3355 | target_obj->pending_write_domain); | ||
3356 | ret = -EINVAL; | ||
3357 | break; | ||
3358 | } | ||
3359 | 3358 | ||
3360 | target_obj->pending_read_domains |= reloc.read_domains; | 3359 | /* and points to somewhere within the target object. */ |
3361 | target_obj->pending_write_domain |= reloc.write_domain; | 3360 | if (reloc->delta >= target_obj->size) { |
3361 | DRM_ERROR("Relocation beyond target object bounds: " | ||
3362 | "obj %p target %d delta %d size %d.\n", | ||
3363 | obj, reloc->target_handle, | ||
3364 | (int) reloc->delta, | ||
3365 | (int) target_obj->size); | ||
3366 | goto err; | ||
3367 | } | ||
3362 | 3368 | ||
3363 | /* If the relocation already has the right value in it, no | 3369 | reloc->delta += target_offset; |
3364 | * more work needs to be done. | 3370 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { |
3365 | */ | 3371 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; |
3366 | if (target_offset == reloc.presumed_offset) | 3372 | char *vaddr; |
3367 | continue; | ||
3368 | 3373 | ||
3369 | /* Check that the relocation address is valid... */ | 3374 | vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); |
3370 | if (reloc.offset > obj->base.size - 4) { | 3375 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; |
3371 | DRM_ERROR("Relocation beyond object bounds: " | 3376 | kunmap_atomic(vaddr); |
3372 | "obj %p target %d offset %d size %d.\n", | 3377 | } else { |
3373 | obj, reloc.target_handle, | 3378 | struct drm_i915_private *dev_priv = dev->dev_private; |
3374 | (int) reloc.offset, (int) obj->base.size); | 3379 | uint32_t __iomem *reloc_entry; |
3375 | ret = -EINVAL; | 3380 | void __iomem *reloc_page; |
3376 | break; | ||
3377 | } | ||
3378 | if (reloc.offset & 3) { | ||
3379 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3380 | "obj %p target %d offset %d.\n", | ||
3381 | obj, reloc.target_handle, | ||
3382 | (int) reloc.offset); | ||
3383 | ret = -EINVAL; | ||
3384 | break; | ||
3385 | } | ||
3386 | 3381 | ||
3387 | /* and points to somewhere within the target object. */ | 3382 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); |
3388 | if (reloc.delta >= target_obj->size) { | 3383 | if (ret) |
3389 | DRM_ERROR("Relocation beyond target object bounds: " | 3384 | goto err; |
3390 | "obj %p target %d delta %d size %d.\n", | ||
3391 | obj, reloc.target_handle, | ||
3392 | (int) reloc.delta, (int) target_obj->size); | ||
3393 | ret = -EINVAL; | ||
3394 | break; | ||
3395 | } | ||
3396 | 3385 | ||
3397 | reloc.delta += target_offset; | 3386 | /* Map the page containing the relocation we're going to perform. */ |
3398 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | 3387 | reloc->offset += obj->gtt_offset; |
3399 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; | 3388 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
3400 | char *vaddr; | 3389 | reloc->offset & PAGE_MASK); |
3390 | reloc_entry = (uint32_t __iomem *) | ||
3391 | (reloc_page + (reloc->offset & ~PAGE_MASK)); | ||
3392 | iowrite32(reloc->delta, reloc_entry); | ||
3393 | io_mapping_unmap_atomic(reloc_page); | ||
3394 | } | ||
3401 | 3395 | ||
3402 | vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); | 3396 | /* and update the user's relocation entry */ |
3403 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; | 3397 | reloc->presumed_offset = target_offset; |
3404 | kunmap_atomic(vaddr); | ||
3405 | } else { | ||
3406 | uint32_t __iomem *reloc_entry; | ||
3407 | void __iomem *reloc_page; | ||
3408 | 3398 | ||
3409 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); | 3399 | out: |
3410 | if (ret) | 3400 | ret = 0; |
3411 | break; | 3401 | err: |
3402 | drm_gem_object_unreference(target_obj); | ||
3403 | return ret; | ||
3404 | } | ||
3412 | 3405 | ||
3413 | /* Map the page containing the relocation we're going to perform. */ | 3406 | static int |
3414 | reloc.offset += obj->gtt_offset; | 3407 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
3415 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 3408 | struct drm_file *file_priv, |
3416 | reloc.offset & PAGE_MASK); | 3409 | struct drm_i915_gem_exec_object2 *entry) |
3417 | reloc_entry = (uint32_t __iomem *) | 3410 | { |
3418 | (reloc_page + (reloc.offset & ~PAGE_MASK)); | 3411 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3419 | iowrite32(reloc.delta, reloc_entry); | 3412 | int i, ret; |
3420 | io_mapping_unmap_atomic(reloc_page); | 3413 | |
3421 | } | 3414 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
3415 | for (i = 0; i < entry->relocation_count; i++) { | ||
3416 | struct drm_i915_gem_relocation_entry reloc; | ||
3417 | |||
3418 | if (__copy_from_user_inatomic(&reloc, | ||
3419 | user_relocs+i, | ||
3420 | sizeof(reloc))) | ||
3421 | return -EFAULT; | ||
3422 | |||
3423 | ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc); | ||
3424 | if (ret) | ||
3425 | return ret; | ||
3422 | 3426 | ||
3423 | /* and update the user's relocation entry */ | ||
3424 | reloc.presumed_offset = target_offset; | ||
3425 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, | 3427 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, |
3426 | &reloc.presumed_offset, | 3428 | &reloc.presumed_offset, |
3427 | sizeof(reloc.presumed_offset))) { | 3429 | sizeof(reloc.presumed_offset))) |
3428 | ret = -EFAULT; | 3430 | return -EFAULT; |
3429 | break; | ||
3430 | } | ||
3431 | } | 3431 | } |
3432 | 3432 | ||
3433 | drm_gem_object_unreference(target_obj); | 3433 | return 0; |
3434 | return ret; | 3434 | } |
3435 | |||
3436 | static int | ||
3437 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | ||
3438 | struct drm_file *file_priv, | ||
3439 | struct drm_i915_gem_exec_object2 *entry, | ||
3440 | struct drm_i915_gem_relocation_entry *relocs) | ||
3441 | { | ||
3442 | int i, ret; | ||
3443 | |||
3444 | for (i = 0; i < entry->relocation_count; i++) { | ||
3445 | ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]); | ||
3446 | if (ret) | ||
3447 | return ret; | ||
3448 | } | ||
3449 | |||
3450 | return 0; | ||
3451 | } | ||
3452 | |||
3453 | static int | ||
3454 | i915_gem_execbuffer_relocate(struct drm_device *dev, | ||
3455 | struct drm_file *file, | ||
3456 | struct drm_gem_object **object_list, | ||
3457 | struct drm_i915_gem_exec_object2 *exec_list, | ||
3458 | int count) | ||
3459 | { | ||
3460 | int i, ret; | ||
3461 | |||
3462 | for (i = 0; i < count; i++) { | ||
3463 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | ||
3464 | obj->base.pending_read_domains = 0; | ||
3465 | obj->base.pending_write_domain = 0; | ||
3466 | ret = i915_gem_execbuffer_relocate_object(obj, file, | ||
3467 | &exec_list[i]); | ||
3468 | if (ret) | ||
3469 | return ret; | ||
3470 | } | ||
3471 | |||
3472 | return 0; | ||
3435 | } | 3473 | } |
3436 | 3474 | ||
3437 | static int | 3475 | static int |
3438 | i915_gem_execbuffer_pin(struct drm_device *dev, | 3476 | i915_gem_execbuffer_reserve(struct drm_device *dev, |
3439 | struct drm_file *file, | 3477 | struct drm_file *file, |
3440 | struct drm_gem_object **object_list, | 3478 | struct drm_gem_object **object_list, |
3441 | struct drm_i915_gem_exec_object2 *exec_list, | 3479 | struct drm_i915_gem_exec_object2 *exec_list, |
3442 | int count) | 3480 | int count) |
3443 | { | 3481 | { |
3444 | struct drm_i915_private *dev_priv = dev->dev_private; | 3482 | struct drm_i915_private *dev_priv = dev->dev_private; |
3445 | int ret, i, retry; | 3483 | int ret, i, retry; |
@@ -3502,6 +3540,87 @@ i915_gem_execbuffer_pin(struct drm_device *dev, | |||
3502 | } | 3540 | } |
3503 | 3541 | ||
3504 | static int | 3542 | static int |
3543 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | ||
3544 | struct drm_file *file, | ||
3545 | struct drm_gem_object **object_list, | ||
3546 | struct drm_i915_gem_exec_object2 *exec_list, | ||
3547 | int count) | ||
3548 | { | ||
3549 | struct drm_i915_gem_relocation_entry *reloc; | ||
3550 | int i, total, ret; | ||
3551 | |||
3552 | for (i = 0; i < count; i++) { | ||
3553 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | ||
3554 | obj->in_execbuffer = false; | ||
3555 | } | ||
3556 | |||
3557 | mutex_unlock(&dev->struct_mutex); | ||
3558 | |||
3559 | total = 0; | ||
3560 | for (i = 0; i < count; i++) | ||
3561 | total += exec_list[i].relocation_count; | ||
3562 | |||
3563 | reloc = drm_malloc_ab(total, sizeof(*reloc)); | ||
3564 | if (reloc == NULL) { | ||
3565 | mutex_lock(&dev->struct_mutex); | ||
3566 | return -ENOMEM; | ||
3567 | } | ||
3568 | |||
3569 | total = 0; | ||
3570 | for (i = 0; i < count; i++) { | ||
3571 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3572 | |||
3573 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3574 | |||
3575 | if (copy_from_user(reloc+total, user_relocs, | ||
3576 | exec_list[i].relocation_count * | ||
3577 | sizeof(*reloc))) { | ||
3578 | ret = -EFAULT; | ||
3579 | mutex_lock(&dev->struct_mutex); | ||
3580 | goto err; | ||
3581 | } | ||
3582 | |||
3583 | total += exec_list[i].relocation_count; | ||
3584 | } | ||
3585 | |||
3586 | ret = i915_mutex_lock_interruptible(dev); | ||
3587 | if (ret) { | ||
3588 | mutex_lock(&dev->struct_mutex); | ||
3589 | goto err; | ||
3590 | } | ||
3591 | |||
3592 | ret = i915_gem_execbuffer_reserve(dev, file, | ||
3593 | object_list, exec_list, | ||
3594 | count); | ||
3595 | if (ret) | ||
3596 | goto err; | ||
3597 | |||
3598 | total = 0; | ||
3599 | for (i = 0; i < count; i++) { | ||
3600 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | ||
3601 | obj->base.pending_read_domains = 0; | ||
3602 | obj->base.pending_write_domain = 0; | ||
3603 | ret = i915_gem_execbuffer_relocate_object_slow(obj, file, | ||
3604 | &exec_list[i], | ||
3605 | reloc + total); | ||
3606 | if (ret) | ||
3607 | goto err; | ||
3608 | |||
3609 | total += exec_list[i].relocation_count; | ||
3610 | } | ||
3611 | |||
3612 | /* Leave the user relocations as are, this is the painfully slow path, | ||
3613 | * and we want to avoid the complication of dropping the lock whilst | ||
3614 | * having buffers reserved in the aperture and so causing spurious | ||
3615 | * ENOSPC for random operations. | ||
3616 | */ | ||
3617 | |||
3618 | err: | ||
3619 | drm_free_large(reloc); | ||
3620 | return ret; | ||
3621 | } | ||
3622 | |||
3623 | static int | ||
3505 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | 3624 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, |
3506 | struct drm_file *file, | 3625 | struct drm_file *file, |
3507 | struct intel_ring_buffer *ring, | 3626 | struct intel_ring_buffer *ring, |
@@ -3781,18 +3900,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3781 | } | 3900 | } |
3782 | 3901 | ||
3783 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 3902 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
3784 | ret = i915_gem_execbuffer_pin(dev, file, | 3903 | ret = i915_gem_execbuffer_reserve(dev, file, |
3785 | object_list, exec_list, | 3904 | object_list, exec_list, |
3786 | args->buffer_count); | 3905 | args->buffer_count); |
3787 | if (ret) | 3906 | if (ret) |
3788 | goto err; | 3907 | goto err; |
3789 | 3908 | ||
3790 | /* The objects are in their final locations, apply the relocations. */ | 3909 | /* The objects are in their final locations, apply the relocations. */ |
3791 | for (i = 0; i < args->buffer_count; i++) { | 3910 | ret = i915_gem_execbuffer_relocate(dev, file, |
3792 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3911 | object_list, exec_list, |
3793 | obj->base.pending_read_domains = 0; | 3912 | args->buffer_count); |
3794 | obj->base.pending_write_domain = 0; | 3913 | if (ret) { |
3795 | ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); | 3914 | if (ret == -EFAULT) { |
3915 | ret = i915_gem_execbuffer_relocate_slow(dev, file, | ||
3916 | object_list, | ||
3917 | exec_list, | ||
3918 | args->buffer_count); | ||
3919 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
3920 | } | ||
3796 | if (ret) | 3921 | if (ret) |
3797 | goto err; | 3922 | goto err; |
3798 | } | 3923 | } |