diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-17 02:57:56 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-17 03:01:08 -0400 |
commit | a22ddff8bedfe33eeb1330bbb7ef1fbe007a42c4 (patch) | |
tree | 61a2eb7fa62f5af10c2b913ca429e6b068b0eb2d /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 20d5a540e55a29daeef12706f9ee73baf5641c16 (diff) | |
parent | d9875690d9b89a866022ff49e3fcea892345ad92 (diff) |
Merge tag 'v3.6-rc2' into drm-intel-next
Backmerge Linux 3.6-rc2 to resolve a few funny conflicts before we put
even more madness on top:
- drivers/gpu/drm/i915/i915_irq.c: Just a spurious WARN removed in
-fixes, that has been changed in a variable-rename in -next, too.
- drivers/gpu/drm/i915/intel_ringbuffer.c: -next remove scratch_addr
(since all their users have been extracted in another fucntion),
-fixes added another user for a hw workaroudn.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 44 |
1 files changed, 28 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e278675cdff9..c828169c73ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -214,26 +214,35 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, | |||
214 | u32 invalidate_domains, u32 flush_domains) | 214 | u32 invalidate_domains, u32 flush_domains) |
215 | { | 215 | { |
216 | u32 flags = 0; | 216 | u32 flags = 0; |
217 | struct pipe_control *pc = ring->private; | ||
218 | u32 scratch_addr = pc->gtt_offset + 128; | ||
217 | int ret; | 219 | int ret; |
218 | 220 | ||
219 | /* Just flush everything. Experiments have shown that reducing the | 221 | /* Just flush everything. Experiments have shown that reducing the |
220 | * number of bits based on the write domains has little performance | 222 | * number of bits based on the write domains has little performance |
221 | * impact. | 223 | * impact. |
222 | */ | 224 | */ |
223 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | 225 | if (flush_domains) { |
224 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | 226 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
225 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | 227 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
226 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | 228 | /* |
227 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | 229 | * Ensure that any following seqno writes only happen |
228 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | 230 | * when the render cache is indeed flushed. |
229 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | 231 | */ |
230 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | ||
231 | /* | ||
232 | * Ensure that any following seqno writes only happen when the render | ||
233 | * cache is indeed flushed (but only if the caller actually wants that). | ||
234 | */ | ||
235 | if (flush_domains) | ||
236 | flags |= PIPE_CONTROL_CS_STALL; | 232 | flags |= PIPE_CONTROL_CS_STALL; |
233 | } | ||
234 | if (invalidate_domains) { | ||
235 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | ||
236 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | ||
237 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | ||
238 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | ||
239 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | ||
240 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | ||
241 | /* | ||
242 | * TLB invalidate requires a post-sync write. | ||
243 | */ | ||
244 | flags |= PIPE_CONTROL_QW_WRITE; | ||
245 | } | ||
237 | 246 | ||
238 | ret = intel_ring_begin(ring, 4); | 247 | ret = intel_ring_begin(ring, 4); |
239 | if (ret) | 248 | if (ret) |
@@ -241,7 +250,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, | |||
241 | 250 | ||
242 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | 251 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
243 | intel_ring_emit(ring, flags); | 252 | intel_ring_emit(ring, flags); |
244 | intel_ring_emit(ring, 0); | 253 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
245 | intel_ring_emit(ring, 0); | 254 | intel_ring_emit(ring, 0); |
246 | intel_ring_advance(ring); | 255 | intel_ring_advance(ring); |
247 | 256 | ||
@@ -294,8 +303,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
294 | I915_WRITE_HEAD(ring, 0); | 303 | I915_WRITE_HEAD(ring, 0); |
295 | ring->write_tail(ring, 0); | 304 | ring->write_tail(ring, 0); |
296 | 305 | ||
297 | /* Initialize the ring. */ | ||
298 | I915_WRITE_START(ring, obj->gtt_offset); | ||
299 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 306 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
300 | 307 | ||
301 | /* G45 ring initialization fails to reset head to zero */ | 308 | /* G45 ring initialization fails to reset head to zero */ |
@@ -321,6 +328,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
321 | } | 328 | } |
322 | } | 329 | } |
323 | 330 | ||
331 | /* Initialize the ring. This must happen _after_ we've cleared the ring | ||
332 | * registers with the above sequence (the readback of the HEAD registers | ||
333 | * also enforces ordering), otherwise the hw might lose the new ring | ||
334 | * register values. */ | ||
335 | I915_WRITE_START(ring, obj->gtt_offset); | ||
324 | I915_WRITE_CTL(ring, | 336 | I915_WRITE_CTL(ring, |
325 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | 337 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
326 | | RING_VALID); | 338 | | RING_VALID); |