diff options
author | Dave Airlie <airlied@redhat.com> | 2013-01-17 05:34:08 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-01-17 05:34:08 -0500 |
commit | b5cc6c0387b2f8d269c1df1e68c97c958dd22fed (patch) | |
tree | 697f2335b3a10f55e0ea226dcd044ee4ff3f0f7f /drivers/gpu/drm/i915/i915_gem_execbuffer.c | |
parent | 9931faca02c604c22335f5a935a501bb2ace6e20 (diff) | |
parent | c0c36b941b6f0be6ac74f340040cbb29d6a0b06c (diff) |
Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes:
- seqno wrap fixes and debug infrastructure from Mika Kuoppala and Chris
Wilson
- some leftover kill-agp on gen6+ patches from Ben
- hotplug improvements from Damien
- clear fb when allocated from stolen, avoids dirt on the fbcon (Chris)
- Stolen mem support from Chris Wilson, one of the many steps to get to
real fastboot support.
- Some DDI code cleanups from Paulo.
- Some refactorings around lvds and dp code.
- some random little bits&pieces
* tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel: (93 commits)
drm/i915: Return the real error code from intel_set_mode()
drm/i915: Make GSM void
drm/i915: Move GSM mapping into dev_priv
drm/i915: Move even more gtt code to i915_gem_gtt
drm/i915: Make next_seqno debugs entry to use i915_gem_set_seqno
drm/i915: Introduce i915_gem_set_seqno()
drm/i915: Always clear semaphore mboxes on seqno wrap
drm/i915: Initialize hardware semaphore state on ring init
drm/i915: Introduce ring set_seqno
drm/i915: Missed conversion to gtt_pte_t
drm/i915: Bug on unsupported swizzled platforms
drm/i915: BUG() if fences are used on unsupported platform
drm/i915: fixup overlay stolen memory leak
drm/i915: clean up PIPECONF bpc #defines
drm/i915: add intel_dp_set_signal_levels
drm/i915: remove leftover display.update_wm assignment
drm/i915: check for the PCH when setting pch_transcoder
drm/i915: Clear the stolen fb before enabling
drm/i915: Access to snooped system memory through the GTT is incoherent
drm/i915: Remove stale comment about intel_dp_detect()
...
Conflicts:
drivers/gpu/drm/i915/intel_display.c
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 53 |
1 files changed, 0 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d6a994a07393..163bb52bd3b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -150,17 +150,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
150 | reloc->write_domain); | 150 | reloc->write_domain); |
151 | return ret; | 151 | return ret; |
152 | } | 152 | } |
153 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && | ||
154 | reloc->write_domain != target_obj->pending_write_domain)) { | ||
155 | DRM_DEBUG("Write domain conflict: " | ||
156 | "obj %p target %d offset %d " | ||
157 | "new %08x old %08x\n", | ||
158 | obj, reloc->target_handle, | ||
159 | (int) reloc->offset, | ||
160 | reloc->write_domain, | ||
161 | target_obj->pending_write_domain); | ||
162 | return ret; | ||
163 | } | ||
164 | 153 | ||
165 | target_obj->pending_read_domains |= reloc->read_domains; | 154 | target_obj->pending_read_domains |= reloc->read_domains; |
166 | target_obj->pending_write_domain |= reloc->write_domain; | 155 | target_obj->pending_write_domain |= reloc->write_domain; |
@@ -602,44 +591,11 @@ err: | |||
602 | } | 591 | } |
603 | 592 | ||
604 | static int | 593 | static int |
605 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) | ||
606 | { | ||
607 | u32 plane, flip_mask; | ||
608 | int ret; | ||
609 | |||
610 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
611 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
612 | * to executing the batch and avoid stalling the CPU. | ||
613 | */ | ||
614 | |||
615 | for (plane = 0; flips >> plane; plane++) { | ||
616 | if (((flips >> plane) & 1) == 0) | ||
617 | continue; | ||
618 | |||
619 | if (plane) | ||
620 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
621 | else | ||
622 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
623 | |||
624 | ret = intel_ring_begin(ring, 2); | ||
625 | if (ret) | ||
626 | return ret; | ||
627 | |||
628 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | ||
629 | intel_ring_emit(ring, MI_NOOP); | ||
630 | intel_ring_advance(ring); | ||
631 | } | ||
632 | |||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static int | ||
637 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | 594 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
638 | struct list_head *objects) | 595 | struct list_head *objects) |
639 | { | 596 | { |
640 | struct drm_i915_gem_object *obj; | 597 | struct drm_i915_gem_object *obj; |
641 | uint32_t flush_domains = 0; | 598 | uint32_t flush_domains = 0; |
642 | uint32_t flips = 0; | ||
643 | int ret; | 599 | int ret; |
644 | 600 | ||
645 | list_for_each_entry(obj, objects, exec_list) { | 601 | list_for_each_entry(obj, objects, exec_list) { |
@@ -650,18 +606,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
650 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) | 606 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
651 | i915_gem_clflush_object(obj); | 607 | i915_gem_clflush_object(obj); |
652 | 608 | ||
653 | if (obj->base.pending_write_domain) | ||
654 | flips |= atomic_read(&obj->pending_flip); | ||
655 | |||
656 | flush_domains |= obj->base.write_domain; | 609 | flush_domains |= obj->base.write_domain; |
657 | } | 610 | } |
658 | 611 | ||
659 | if (flips) { | ||
660 | ret = i915_gem_execbuffer_wait_for_flips(ring, flips); | ||
661 | if (ret) | ||
662 | return ret; | ||
663 | } | ||
664 | |||
665 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 612 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
666 | i915_gem_chipset_flush(ring->dev); | 613 | i915_gem_chipset_flush(ring->dev); |
667 | 614 | ||