aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2012-09-13 10:41:01 -0400
committerBjorn Helgaas <bhelgaas@google.com>2012-09-13 10:41:01 -0400
commit78890b5989d96ddce989cde929c45ceeded0fcaf (patch)
tree4e2da81fc7c97f11aee174b1eedac110c9a68b3a /drivers/gpu/drm/i915/intel_ringbuffer.c
parent1959ec5f82acbdf91425b41600f119ebecb5f6a8 (diff)
parent55d512e245bc7699a8800e23df1a24195dd08217 (diff)
Merge commit 'v3.6-rc5' into next
* commit 'v3.6-rc5': (1098 commits) Linux 3.6-rc5 HID: tpkbd: work even if the new Lenovo Keyboard driver is not configured Remove user-triggerable BUG from mpol_to_str xen/pciback: Fix proper FLR steps. uml: fix compile error in deliver_alarm() dj: memory scribble in logi_dj Fix order of arguments to compat_put_time[spec|val] xen: Use correct masking in xen_swiotlb_alloc_coherent. xen: fix logical error in tlb flushing xen/p2m: Fix one-off error in checking the P2M tree directory. powerpc: Don't use __put_user() in patch_instruction powerpc: Make sure IPI handlers see data written by IPI senders powerpc: Restore correct DSCR in context switch powerpc: Fix DSCR inheritance in copy_thread() powerpc: Keep thread.dscr and thread.dscr_inherit in sync powerpc: Update DSCR on all CPUs when writing sysfs dscr_default powerpc/powernv: Always go into nap mode when CPU is offline powerpc: Give hypervisor decrementer interrupts their own handler powerpc/vphn: Fix arch_update_cpu_topology() return value ARM: gemini: fix the gemini build ... Conflicts: drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c drivers/rapidio/devices/tsi721.c
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c48
1 files changed, 28 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index bf0195a96d53..e2a73b38abe9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
227 * number of bits based on the write domains has little performance 227 * number of bits based on the write domains has little performance
228 * impact. 228 * impact.
229 */ 229 */
230 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 230 if (flush_domains) {
231 flags |= PIPE_CONTROL_TLB_INVALIDATE; 231 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
232 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 232 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
233 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 233 /*
234 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 234 * Ensure that any following seqno writes only happen
235 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 235 * when the render cache is indeed flushed.
236 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 236 */
237 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
238 /*
239 * Ensure that any following seqno writes only happen when the render
240 * cache is indeed flushed (but only if the caller actually wants that).
241 */
242 if (flush_domains)
243 flags |= PIPE_CONTROL_CS_STALL; 237 flags |= PIPE_CONTROL_CS_STALL;
238 }
239 if (invalidate_domains) {
240 flags |= PIPE_CONTROL_TLB_INVALIDATE;
241 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
244 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
245 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
246 /*
247 * TLB invalidate requires a post-sync write.
248 */
249 flags |= PIPE_CONTROL_QW_WRITE;
250 }
244 251
245 ret = intel_ring_begin(ring, 6); 252 ret = intel_ring_begin(ring, 4);
246 if (ret) 253 if (ret)
247 return ret; 254 return ret;
248 255
249 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 256 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
250 intel_ring_emit(ring, flags); 257 intel_ring_emit(ring, flags);
251 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 258 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
252 intel_ring_emit(ring, 0); /* lower dword */ 259 intel_ring_emit(ring, 0);
253 intel_ring_emit(ring, 0); /* uppwer dword */
254 intel_ring_emit(ring, MI_NOOP);
255 intel_ring_advance(ring); 260 intel_ring_advance(ring);
256 261
257 return 0; 262 return 0;
@@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
289 I915_WRITE_HEAD(ring, 0); 294 I915_WRITE_HEAD(ring, 0);
290 ring->write_tail(ring, 0); 295 ring->write_tail(ring, 0);
291 296
292 /* Initialize the ring. */
293 I915_WRITE_START(ring, obj->gtt_offset);
294 head = I915_READ_HEAD(ring) & HEAD_ADDR; 297 head = I915_READ_HEAD(ring) & HEAD_ADDR;
295 298
296 /* G45 ring initialization fails to reset head to zero */ 299 /* G45 ring initialization fails to reset head to zero */
@@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
316 } 319 }
317 } 320 }
318 321
322 /* Initialize the ring. This must happen _after_ we've cleared the ring
323 * registers with the above sequence (the readback of the HEAD registers
324 * also enforces ordering), otherwise the hw might lose the new ring
325 * register values. */
326 I915_WRITE_START(ring, obj->gtt_offset);
319 I915_WRITE_CTL(ring, 327 I915_WRITE_CTL(ring,
320 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 328 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
321 | RING_VALID); 329 | RING_VALID);