diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-14 07:44:48 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-19 04:19:18 -0400 |
commit | f0c43d9b7ec1bb9827b3dd5ac5915d22ceed8f6a (patch) | |
tree | 4dc15b6745685c3d1c7ad380fa020c6d6c316576 /drivers | |
parent | 2549d6c26ce1c85a76990b972a2c7e8f440455cd (diff) |
drm/i915: Perform relocations in CPU domain [if in CPU domain]
Avoid an early eviction of the batch buffer into the uncached GTT
domain, and so do the relocation fixup in cacheable memory.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 55 |
1 files changed, 29 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 67998e8a2d70..32ff571672b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3124,9 +3124,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3124 | uint32_t flush_domains = 0; | 3124 | uint32_t flush_domains = 0; |
3125 | uint32_t old_read_domains; | 3125 | uint32_t old_read_domains; |
3126 | 3126 | ||
3127 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); | ||
3128 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); | ||
3129 | |||
3130 | intel_mark_busy(dev, obj); | 3127 | intel_mark_busy(dev, obj); |
3131 | 3128 | ||
3132 | /* | 3129 | /* |
@@ -3298,7 +3295,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3298 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3295 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3299 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3296 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3300 | int i, ret; | 3297 | int i, ret; |
3301 | void __iomem *reloc_page; | ||
3302 | bool need_fence; | 3298 | bool need_fence; |
3303 | 3299 | ||
3304 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 3300 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
@@ -3342,8 +3338,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3342 | struct drm_i915_gem_relocation_entry reloc; | 3338 | struct drm_i915_gem_relocation_entry reloc; |
3343 | struct drm_gem_object *target_obj; | 3339 | struct drm_gem_object *target_obj; |
3344 | struct drm_i915_gem_object *target_obj_priv; | 3340 | struct drm_i915_gem_object *target_obj_priv; |
3345 | uint32_t reloc_val, reloc_offset; | ||
3346 | uint32_t __iomem *reloc_entry; | ||
3347 | 3341 | ||
3348 | ret = __copy_from_user_inatomic(&reloc, | 3342 | ret = __copy_from_user_inatomic(&reloc, |
3349 | user_relocs+i, | 3343 | user_relocs+i, |
@@ -3469,27 +3463,36 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3469 | return -EINVAL; | 3463 | return -EINVAL; |
3470 | } | 3464 | } |
3471 | 3465 | ||
3472 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 3466 | reloc.delta += target_obj_priv->gtt_offset; |
3473 | if (ret != 0) { | 3467 | if (obj->write_domain == I915_GEM_DOMAIN_CPU) { |
3474 | drm_gem_object_unreference(target_obj); | 3468 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; |
3475 | i915_gem_object_unpin(obj); | 3469 | char *vaddr; |
3476 | return ret; | ||
3477 | } | ||
3478 | 3470 | ||
3479 | /* Map the page containing the relocation we're going to | 3471 | vaddr = kmap_atomic(obj_priv->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); |
3480 | * perform. | 3472 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; |
3481 | */ | 3473 | kunmap_atomic(vaddr, KM_USER0); |
3482 | reloc_offset = obj_priv->gtt_offset + reloc.offset; | 3474 | } else { |
3483 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 3475 | uint32_t __iomem *reloc_entry; |
3484 | (reloc_offset & | 3476 | void __iomem *reloc_page; |
3485 | ~(PAGE_SIZE - 1)), | 3477 | int ret; |
3486 | KM_USER0); | 3478 | |
3487 | reloc_entry = (uint32_t __iomem *)(reloc_page + | 3479 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
3488 | (reloc_offset & (PAGE_SIZE - 1))); | 3480 | if (ret) { |
3489 | reloc_val = target_obj_priv->gtt_offset + reloc.delta; | 3481 | drm_gem_object_unreference(target_obj); |
3490 | 3482 | i915_gem_object_unpin(obj); | |
3491 | writel(reloc_val, reloc_entry); | 3483 | return ret; |
3492 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | 3484 | } |
3485 | |||
3486 | /* Map the page containing the relocation we're going to perform. */ | ||
3487 | reloc.offset += obj_priv->gtt_offset; | ||
3488 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
3489 | reloc.offset & PAGE_MASK, | ||
3490 | KM_USER0); | ||
3491 | reloc_entry = (uint32_t __iomem *) | ||
3492 | (reloc_page + (reloc.offset & ~PAGE_MASK)); | ||
3493 | iowrite32(reloc.delta, reloc_entry); | ||
3494 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | ||
3495 | } | ||
3493 | 3496 | ||
3494 | drm_gem_object_unreference(target_obj); | 3497 | drm_gem_object_unreference(target_obj); |
3495 | } | 3498 | } |