aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-12 00:37:42 -0500
committerPaul Mundt <lethal@linux-sh.org>2011-01-12 00:37:42 -0500
commit83eb95b852902f952ba594447a796ad8146b9462 (patch)
tree33c199aeeae58b69ad8d6d2a33c2d96ba2b98ddf /drivers/gpu/drm/i915/intel_ringbuffer.c
parentefb3e34b6176d30c4fe8635fa8e1beb6280cc2cd (diff)
parent9bbe7b984096ac45586da2adf26c14069ecb79b2 (diff)
Merge branch 'sh/sdio' into sh-latest
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1007
1 files changed, 591 insertions, 416 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31cd7e33e820..56bc95c056dd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
49} 49}
50 50
51static void 51static void
52render_ring_flush(struct drm_device *dev, 52render_ring_flush(struct intel_ring_buffer *ring,
53 struct intel_ring_buffer *ring,
54 u32 invalidate_domains, 53 u32 invalidate_domains,
55 u32 flush_domains) 54 u32 flush_domains)
56{ 55{
56 struct drm_device *dev = ring->dev;
57 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd; 58 u32 cmd;
59 59
@@ -109,49 +109,50 @@ render_ring_flush(struct drm_device *dev,
109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
110 cmd |= MI_EXE_FLUSH; 110 cmd |= MI_EXE_FLUSH;
111 111
112 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
113 (IS_G4X(dev) || IS_GEN5(dev)))
114 cmd |= MI_INVALIDATE_ISP;
115
112#if WATCH_EXEC 116#if WATCH_EXEC
113 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
114#endif 118#endif
115 intel_ring_begin(dev, ring, 2); 119 if (intel_ring_begin(ring, 2) == 0) {
116 intel_ring_emit(dev, ring, cmd); 120 intel_ring_emit(ring, cmd);
117 intel_ring_emit(dev, ring, MI_NOOP); 121 intel_ring_emit(ring, MI_NOOP);
118 intel_ring_advance(dev, ring); 122 intel_ring_advance(ring);
123 }
119 } 124 }
120} 125}
121 126
122static void ring_write_tail(struct drm_device *dev, 127static void ring_write_tail(struct intel_ring_buffer *ring,
123 struct intel_ring_buffer *ring,
124 u32 value) 128 u32 value)
125{ 129{
126 drm_i915_private_t *dev_priv = dev->dev_private; 130 drm_i915_private_t *dev_priv = ring->dev->dev_private;
127 I915_WRITE_TAIL(ring, value); 131 I915_WRITE_TAIL(ring, value);
128} 132}
129 133
130u32 intel_ring_get_active_head(struct drm_device *dev, 134u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
131 struct intel_ring_buffer *ring)
132{ 135{
133 drm_i915_private_t *dev_priv = dev->dev_private; 136 drm_i915_private_t *dev_priv = ring->dev->dev_private;
134 u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? 137 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
135 RING_ACTHD(ring->mmio_base) : ACTHD; 138 RING_ACTHD(ring->mmio_base) : ACTHD;
136 139
137 return I915_READ(acthd_reg); 140 return I915_READ(acthd_reg);
138} 141}
139 142
140static int init_ring_common(struct drm_device *dev, 143static int init_ring_common(struct intel_ring_buffer *ring)
141 struct intel_ring_buffer *ring)
142{ 144{
145 drm_i915_private_t *dev_priv = ring->dev->dev_private;
146 struct drm_i915_gem_object *obj = ring->obj;
143 u32 head; 147 u32 head;
144 drm_i915_private_t *dev_priv = dev->dev_private;
145 struct drm_i915_gem_object *obj_priv;
146 obj_priv = to_intel_bo(ring->gem_object);
147 148
148 /* Stop the ring if it's running. */ 149 /* Stop the ring if it's running. */
149 I915_WRITE_CTL(ring, 0); 150 I915_WRITE_CTL(ring, 0);
150 I915_WRITE_HEAD(ring, 0); 151 I915_WRITE_HEAD(ring, 0);
151 ring->write_tail(dev, ring, 0); 152 ring->write_tail(ring, 0);
152 153
153 /* Initialize the ring. */ 154 /* Initialize the ring. */
154 I915_WRITE_START(ring, obj_priv->gtt_offset); 155 I915_WRITE_START(ring, obj->gtt_offset);
155 head = I915_READ_HEAD(ring) & HEAD_ADDR; 156 head = I915_READ_HEAD(ring) & HEAD_ADDR;
156 157
157 /* G45 ring initialization fails to reset head to zero */ 158 /* G45 ring initialization fails to reset head to zero */
@@ -178,12 +179,13 @@ static int init_ring_common(struct drm_device *dev,
178 } 179 }
179 180
180 I915_WRITE_CTL(ring, 181 I915_WRITE_CTL(ring,
181 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 182 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
182 | RING_REPORT_64K | RING_VALID); 183 | RING_REPORT_64K | RING_VALID);
183 184
184 head = I915_READ_HEAD(ring) & HEAD_ADDR;
185 /* If the head is still not zero, the ring is dead */ 185 /* If the head is still not zero, the ring is dead */
186 if (head != 0) { 186 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
187 I915_READ_START(ring) != obj->gtt_offset ||
188 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
187 DRM_ERROR("%s initialization failed " 189 DRM_ERROR("%s initialization failed "
188 "ctl %08x head %08x tail %08x start %08x\n", 190 "ctl %08x head %08x tail %08x start %08x\n",
189 ring->name, 191 ring->name,
@@ -194,8 +196,8 @@ static int init_ring_common(struct drm_device *dev,
194 return -EIO; 196 return -EIO;
195 } 197 }
196 198
197 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 199 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
198 i915_kernel_lost_context(dev); 200 i915_kernel_lost_context(ring->dev);
199 else { 201 else {
200 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 202 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
201 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 203 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -203,335 +205,500 @@ static int init_ring_common(struct drm_device *dev,
203 if (ring->space < 0) 205 if (ring->space < 0)
204 ring->space += ring->size; 206 ring->space += ring->size;
205 } 207 }
208
206 return 0; 209 return 0;
207} 210}
208 211
209static int init_render_ring(struct drm_device *dev, 212/*
210 struct intel_ring_buffer *ring) 213 * 965+ support PIPE_CONTROL commands, which provide finer grained control
214 * over cache flushing.
215 */
216struct pipe_control {
217 struct drm_i915_gem_object *obj;
218 volatile u32 *cpu_page;
219 u32 gtt_offset;
220};
221
222static int
223init_pipe_control(struct intel_ring_buffer *ring)
211{ 224{
212 drm_i915_private_t *dev_priv = dev->dev_private; 225 struct pipe_control *pc;
213 int ret = init_ring_common(dev, ring); 226 struct drm_i915_gem_object *obj;
214 int mode; 227 int ret;
228
229 if (ring->private)
230 return 0;
231
232 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
233 if (!pc)
234 return -ENOMEM;
235
236 obj = i915_gem_alloc_object(ring->dev, 4096);
237 if (obj == NULL) {
238 DRM_ERROR("Failed to allocate seqno page\n");
239 ret = -ENOMEM;
240 goto err;
241 }
242 obj->agp_type = AGP_USER_CACHED_MEMORY;
243
244 ret = i915_gem_object_pin(obj, 4096, true);
245 if (ret)
246 goto err_unref;
247
248 pc->gtt_offset = obj->gtt_offset;
249 pc->cpu_page = kmap(obj->pages[0]);
250 if (pc->cpu_page == NULL)
251 goto err_unpin;
252
253 pc->obj = obj;
254 ring->private = pc;
255 return 0;
256
257err_unpin:
258 i915_gem_object_unpin(obj);
259err_unref:
260 drm_gem_object_unreference(&obj->base);
261err:
262 kfree(pc);
263 return ret;
264}
265
266static void
267cleanup_pipe_control(struct intel_ring_buffer *ring)
268{
269 struct pipe_control *pc = ring->private;
270 struct drm_i915_gem_object *obj;
271
272 if (!ring->private)
273 return;
274
275 obj = pc->obj;
276 kunmap(obj->pages[0]);
277 i915_gem_object_unpin(obj);
278 drm_gem_object_unreference(&obj->base);
279
280 kfree(pc);
281 ring->private = NULL;
282}
283
284static int init_render_ring(struct intel_ring_buffer *ring)
285{
286 struct drm_device *dev = ring->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 int ret = init_ring_common(ring);
215 289
216 if (INTEL_INFO(dev)->gen > 3) { 290 if (INTEL_INFO(dev)->gen > 3) {
217 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 291 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
218 if (IS_GEN6(dev)) 292 if (IS_GEN6(dev))
219 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 293 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
220 I915_WRITE(MI_MODE, mode); 294 I915_WRITE(MI_MODE, mode);
221 } 295 }
296
297 if (INTEL_INFO(dev)->gen >= 6) {
298 } else if (IS_GEN5(dev)) {
299 ret = init_pipe_control(ring);
300 if (ret)
301 return ret;
302 }
303
222 return ret; 304 return ret;
223} 305}
224 306
225#define PIPE_CONTROL_FLUSH(addr) \ 307static void render_ring_cleanup(struct intel_ring_buffer *ring)
308{
309 if (!ring->private)
310 return;
311
312 cleanup_pipe_control(ring);
313}
314
315static void
316update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
317{
318 struct drm_device *dev = ring->dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 int id;
321
322 /*
323 * cs -> 1 = vcs, 0 = bcs
324 * vcs -> 1 = bcs, 0 = cs,
325 * bcs -> 1 = cs, 0 = vcs.
326 */
327 id = ring - dev_priv->ring;
328 id += 2 - i;
329 id %= 3;
330
331 intel_ring_emit(ring,
332 MI_SEMAPHORE_MBOX |
333 MI_SEMAPHORE_REGISTER |
334 MI_SEMAPHORE_UPDATE);
335 intel_ring_emit(ring, seqno);
336 intel_ring_emit(ring,
337 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
338}
339
340static int
341gen6_add_request(struct intel_ring_buffer *ring,
342 u32 *result)
343{
344 u32 seqno;
345 int ret;
346
347 ret = intel_ring_begin(ring, 10);
348 if (ret)
349 return ret;
350
351 seqno = i915_gem_get_seqno(ring->dev);
352 update_semaphore(ring, 0, seqno);
353 update_semaphore(ring, 1, seqno);
354
355 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
356 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
357 intel_ring_emit(ring, seqno);
358 intel_ring_emit(ring, MI_USER_INTERRUPT);
359 intel_ring_advance(ring);
360
361 *result = seqno;
362 return 0;
363}
364
365int
366intel_ring_sync(struct intel_ring_buffer *ring,
367 struct intel_ring_buffer *to,
368 u32 seqno)
369{
370 int ret;
371
372 ret = intel_ring_begin(ring, 4);
373 if (ret)
374 return ret;
375
376 intel_ring_emit(ring,
377 MI_SEMAPHORE_MBOX |
378 MI_SEMAPHORE_REGISTER |
379 intel_ring_sync_index(ring, to) << 17 |
380 MI_SEMAPHORE_COMPARE);
381 intel_ring_emit(ring, seqno);
382 intel_ring_emit(ring, 0);
383 intel_ring_emit(ring, MI_NOOP);
384 intel_ring_advance(ring);
385
386 return 0;
387}
388
389#define PIPE_CONTROL_FLUSH(ring__, addr__) \
226do { \ 390do { \
227 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 391 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
228 PIPE_CONTROL_DEPTH_STALL | 2); \ 392 PIPE_CONTROL_DEPTH_STALL | 2); \
229 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ 393 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
230 OUT_RING(0); \ 394 intel_ring_emit(ring__, 0); \
231 OUT_RING(0); \ 395 intel_ring_emit(ring__, 0); \
232} while (0) 396} while (0)
233 397
234/** 398static int
235 * Creates a new sequence number, emitting a write of it to the status page 399pc_render_add_request(struct intel_ring_buffer *ring,
236 * plus an interrupt, which will trigger i915_user_interrupt_handler. 400 u32 *result)
237 *
238 * Must be called with struct_lock held.
239 *
240 * Returned sequence numbers are nonzero on success.
241 */
242static u32
243render_ring_add_request(struct drm_device *dev,
244 struct intel_ring_buffer *ring,
245 u32 flush_domains)
246{ 401{
247 drm_i915_private_t *dev_priv = dev->dev_private; 402 struct drm_device *dev = ring->dev;
248 u32 seqno; 403 u32 seqno = i915_gem_get_seqno(dev);
404 struct pipe_control *pc = ring->private;
405 u32 scratch_addr = pc->gtt_offset + 128;
406 int ret;
249 407
250 seqno = i915_gem_get_seqno(dev); 408 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
251 409 * incoherent with writes to memory, i.e. completely fubar,
252 if (IS_GEN6(dev)) { 410 * so we need to use PIPE_NOTIFY instead.
253 BEGIN_LP_RING(6); 411 *
254 OUT_RING(GFX_OP_PIPE_CONTROL | 3); 412 * However, we also need to workaround the qword write
255 OUT_RING(PIPE_CONTROL_QW_WRITE | 413 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
256 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | 414 * memory before requesting an interrupt.
257 PIPE_CONTROL_NOTIFY); 415 */
258 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 416 ret = intel_ring_begin(ring, 32);
259 OUT_RING(seqno); 417 if (ret)
260 OUT_RING(0); 418 return ret;
261 OUT_RING(0); 419
262 ADVANCE_LP_RING(); 420 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
263 } else if (HAS_PIPE_CONTROL(dev)) { 421 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
264 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 422 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
423 intel_ring_emit(ring, seqno);
424 intel_ring_emit(ring, 0);
425 PIPE_CONTROL_FLUSH(ring, scratch_addr);
426 scratch_addr += 128; /* write to separate cachelines */
427 PIPE_CONTROL_FLUSH(ring, scratch_addr);
428 scratch_addr += 128;
429 PIPE_CONTROL_FLUSH(ring, scratch_addr);
430 scratch_addr += 128;
431 PIPE_CONTROL_FLUSH(ring, scratch_addr);
432 scratch_addr += 128;
433 PIPE_CONTROL_FLUSH(ring, scratch_addr);
434 scratch_addr += 128;
435 PIPE_CONTROL_FLUSH(ring, scratch_addr);
436 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
437 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
438 PIPE_CONTROL_NOTIFY);
439 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
440 intel_ring_emit(ring, seqno);
441 intel_ring_emit(ring, 0);
442 intel_ring_advance(ring);
443
444 *result = seqno;
445 return 0;
446}
265 447
266 /* 448static int
267 * Workaround qword write incoherence by flushing the 449render_ring_add_request(struct intel_ring_buffer *ring,
268 * PIPE_NOTIFY buffers out to memory before requesting 450 u32 *result)
269 * an interrupt. 451{
270 */ 452 struct drm_device *dev = ring->dev;
271 BEGIN_LP_RING(32); 453 u32 seqno = i915_gem_get_seqno(dev);
272 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 454 int ret;
273 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
274 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
275 OUT_RING(seqno);
276 OUT_RING(0);
277 PIPE_CONTROL_FLUSH(scratch_addr);
278 scratch_addr += 128; /* write to separate cachelines */
279 PIPE_CONTROL_FLUSH(scratch_addr);
280 scratch_addr += 128;
281 PIPE_CONTROL_FLUSH(scratch_addr);
282 scratch_addr += 128;
283 PIPE_CONTROL_FLUSH(scratch_addr);
284 scratch_addr += 128;
285 PIPE_CONTROL_FLUSH(scratch_addr);
286 scratch_addr += 128;
287 PIPE_CONTROL_FLUSH(scratch_addr);
288 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
289 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
290 PIPE_CONTROL_NOTIFY);
291 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
292 OUT_RING(seqno);
293 OUT_RING(0);
294 ADVANCE_LP_RING();
295 } else {
296 BEGIN_LP_RING(4);
297 OUT_RING(MI_STORE_DWORD_INDEX);
298 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
299 OUT_RING(seqno);
300 455
301 OUT_RING(MI_USER_INTERRUPT); 456 ret = intel_ring_begin(ring, 4);
302 ADVANCE_LP_RING(); 457 if (ret)
303 } 458 return ret;
304 return seqno; 459
460 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
461 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
462 intel_ring_emit(ring, seqno);
463 intel_ring_emit(ring, MI_USER_INTERRUPT);
464 intel_ring_advance(ring);
465
466 *result = seqno;
467 return 0;
305} 468}
306 469
307static u32 470static u32
308render_ring_get_seqno(struct drm_device *dev, 471ring_get_seqno(struct intel_ring_buffer *ring)
309 struct intel_ring_buffer *ring)
310{ 472{
311 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 473 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
312 if (HAS_PIPE_CONTROL(dev))
313 return ((volatile u32 *)(dev_priv->seqno_page))[0];
314 else
315 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
316} 474}
317 475
318static void 476static u32
319render_ring_get_user_irq(struct drm_device *dev, 477pc_render_get_seqno(struct intel_ring_buffer *ring)
320 struct intel_ring_buffer *ring)
321{ 478{
322 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 479 struct pipe_control *pc = ring->private;
323 unsigned long irqflags; 480 return pc->cpu_page[0];
481}
482
483static bool
484render_ring_get_irq(struct intel_ring_buffer *ring)
485{
486 struct drm_device *dev = ring->dev;
487
488 if (!dev->irq_enabled)
489 return false;
490
491 if (atomic_inc_return(&ring->irq_refcount) == 1) {
492 drm_i915_private_t *dev_priv = dev->dev_private;
493 unsigned long irqflags;
324 494
325 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
326 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
327 if (HAS_PCH_SPLIT(dev)) 496 if (HAS_PCH_SPLIT(dev))
328 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 497 ironlake_enable_graphics_irq(dev_priv,
498 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
329 else 499 else
330 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 500 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
331 } 502 }
332 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 503
504 return true;
333} 505}
334 506
335static void 507static void
336render_ring_put_user_irq(struct drm_device *dev, 508render_ring_put_irq(struct intel_ring_buffer *ring)
337 struct intel_ring_buffer *ring)
338{ 509{
339 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 510 struct drm_device *dev = ring->dev;
340 unsigned long irqflags; 511
512 if (atomic_dec_and_test(&ring->irq_refcount)) {
513 drm_i915_private_t *dev_priv = dev->dev_private;
514 unsigned long irqflags;
341 515
342 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 516 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
343 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
344 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
345 if (HAS_PCH_SPLIT(dev)) 517 if (HAS_PCH_SPLIT(dev))
346 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 518 ironlake_disable_graphics_irq(dev_priv,
519 GT_USER_INTERRUPT |
520 GT_PIPE_NOTIFY);
347 else 521 else
348 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 522 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
523 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
349 } 524 }
350 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
351} 525}
352 526
353void intel_ring_setup_status_page(struct drm_device *dev, 527void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
354 struct intel_ring_buffer *ring)
355{ 528{
356 drm_i915_private_t *dev_priv = dev->dev_private; 529 drm_i915_private_t *dev_priv = ring->dev->dev_private;
357 if (IS_GEN6(dev)) { 530 u32 mmio = IS_GEN6(ring->dev) ?
358 I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), 531 RING_HWS_PGA_GEN6(ring->mmio_base) :
359 ring->status_page.gfx_addr); 532 RING_HWS_PGA(ring->mmio_base);
360 I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ 533 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
361 } else { 534 POSTING_READ(mmio);
362 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
363 ring->status_page.gfx_addr);
364 I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
365 }
366
367} 535}
368 536
369static void 537static void
370bsd_ring_flush(struct drm_device *dev, 538bsd_ring_flush(struct intel_ring_buffer *ring,
371 struct intel_ring_buffer *ring, 539 u32 invalidate_domains,
372 u32 invalidate_domains, 540 u32 flush_domains)
373 u32 flush_domains)
374{ 541{
375 intel_ring_begin(dev, ring, 2); 542 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
376 intel_ring_emit(dev, ring, MI_FLUSH); 543 return;
377 intel_ring_emit(dev, ring, MI_NOOP);
378 intel_ring_advance(dev, ring);
379}
380 544
381static int init_bsd_ring(struct drm_device *dev, 545 if (intel_ring_begin(ring, 2) == 0) {
382 struct intel_ring_buffer *ring) 546 intel_ring_emit(ring, MI_FLUSH);
383{ 547 intel_ring_emit(ring, MI_NOOP);
384 return init_ring_common(dev, ring); 548 intel_ring_advance(ring);
549 }
385} 550}
386 551
387static u32 552static int
388ring_add_request(struct drm_device *dev, 553ring_add_request(struct intel_ring_buffer *ring,
389 struct intel_ring_buffer *ring, 554 u32 *result)
390 u32 flush_domains)
391{ 555{
392 u32 seqno; 556 u32 seqno;
557 int ret;
558
559 ret = intel_ring_begin(ring, 4);
560 if (ret)
561 return ret;
393 562
394 seqno = i915_gem_get_seqno(dev); 563 seqno = i915_gem_get_seqno(ring->dev);
395 564
396 intel_ring_begin(dev, ring, 4); 565 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
397 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); 566 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
398 intel_ring_emit(dev, ring, 567 intel_ring_emit(ring, seqno);
399 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 568 intel_ring_emit(ring, MI_USER_INTERRUPT);
400 intel_ring_emit(dev, ring, seqno); 569 intel_ring_advance(ring);
401 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
402 intel_ring_advance(dev, ring);
403 570
404 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 571 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
405 572 *result = seqno;
406 return seqno; 573 return 0;
407} 574}
408 575
409static void 576static bool
410bsd_ring_get_user_irq(struct drm_device *dev, 577ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
411 struct intel_ring_buffer *ring)
412{ 578{
413 /* do nothing */ 579 struct drm_device *dev = ring->dev;
580
581 if (!dev->irq_enabled)
582 return false;
583
584 if (atomic_inc_return(&ring->irq_refcount) == 1) {
585 drm_i915_private_t *dev_priv = dev->dev_private;
586 unsigned long irqflags;
587
588 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
589 ironlake_enable_graphics_irq(dev_priv, flag);
590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
591 }
592
593 return true;
414} 594}
595
415static void 596static void
416bsd_ring_put_user_irq(struct drm_device *dev, 597ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
417 struct intel_ring_buffer *ring)
418{ 598{
419 /* do nothing */ 599 struct drm_device *dev = ring->dev;
600
601 if (atomic_dec_and_test(&ring->irq_refcount)) {
602 drm_i915_private_t *dev_priv = dev->dev_private;
603 unsigned long irqflags;
604
605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
606 ironlake_disable_graphics_irq(dev_priv, flag);
607 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
608 }
420} 609}
421 610
422static u32 611static bool
423ring_status_page_get_seqno(struct drm_device *dev, 612bsd_ring_get_irq(struct intel_ring_buffer *ring)
424 struct intel_ring_buffer *ring)
425{ 613{
426 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 614 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
615}
616static void
617bsd_ring_put_irq(struct intel_ring_buffer *ring)
618{
619 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
427} 620}
428 621
429static int 622static int
430ring_dispatch_gem_execbuffer(struct drm_device *dev, 623ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
431 struct intel_ring_buffer *ring,
432 struct drm_i915_gem_execbuffer2 *exec,
433 struct drm_clip_rect *cliprects,
434 uint64_t exec_offset)
435{ 624{
436 uint32_t exec_start; 625 int ret;
437 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 626
438 intel_ring_begin(dev, ring, 2); 627 ret = intel_ring_begin(ring, 2);
439 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | 628 if (ret)
440 (2 << 6) | MI_BATCH_NON_SECURE_I965); 629 return ret;
441 intel_ring_emit(dev, ring, exec_start); 630
442 intel_ring_advance(dev, ring); 631 intel_ring_emit(ring,
632 MI_BATCH_BUFFER_START | (2 << 6) |
633 MI_BATCH_NON_SECURE_I965);
634 intel_ring_emit(ring, offset);
635 intel_ring_advance(ring);
636
443 return 0; 637 return 0;
444} 638}
445 639
446static int 640static int
447render_ring_dispatch_gem_execbuffer(struct drm_device *dev, 641render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
448 struct intel_ring_buffer *ring, 642 u32 offset, u32 len)
449 struct drm_i915_gem_execbuffer2 *exec,
450 struct drm_clip_rect *cliprects,
451 uint64_t exec_offset)
452{ 643{
644 struct drm_device *dev = ring->dev;
453 drm_i915_private_t *dev_priv = dev->dev_private; 645 drm_i915_private_t *dev_priv = dev->dev_private;
454 int nbox = exec->num_cliprects; 646 int ret;
455 int i = 0, count;
456 uint32_t exec_start, exec_len;
457 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
458 exec_len = (uint32_t) exec->batch_len;
459 647
460 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); 648 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
461 649
462 count = nbox ? nbox : 1; 650 if (IS_I830(dev) || IS_845G(dev)) {
651 ret = intel_ring_begin(ring, 4);
652 if (ret)
653 return ret;
463 654
464 for (i = 0; i < count; i++) { 655 intel_ring_emit(ring, MI_BATCH_BUFFER);
465 if (i < nbox) { 656 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
466 int ret = i915_emit_box(dev, cliprects, i, 657 intel_ring_emit(ring, offset + len - 8);
467 exec->DR1, exec->DR4); 658 intel_ring_emit(ring, 0);
468 if (ret) 659 } else {
469 return ret; 660 ret = intel_ring_begin(ring, 2);
470 } 661 if (ret)
662 return ret;
471 663
472 if (IS_I830(dev) || IS_845G(dev)) { 664 if (INTEL_INFO(dev)->gen >= 4) {
473 intel_ring_begin(dev, ring, 4); 665 intel_ring_emit(ring,
474 intel_ring_emit(dev, ring, MI_BATCH_BUFFER); 666 MI_BATCH_BUFFER_START | (2 << 6) |
475 intel_ring_emit(dev, ring, 667 MI_BATCH_NON_SECURE_I965);
476 exec_start | MI_BATCH_NON_SECURE); 668 intel_ring_emit(ring, offset);
477 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
478 intel_ring_emit(dev, ring, 0);
479 } else { 669 } else {
480 intel_ring_begin(dev, ring, 2); 670 intel_ring_emit(ring,
481 if (INTEL_INFO(dev)->gen >= 4) { 671 MI_BATCH_BUFFER_START | (2 << 6));
482 intel_ring_emit(dev, ring, 672 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
483 MI_BATCH_BUFFER_START | (2 << 6)
484 | MI_BATCH_NON_SECURE_I965);
485 intel_ring_emit(dev, ring, exec_start);
486 } else {
487 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
488 | (2 << 6));
489 intel_ring_emit(dev, ring, exec_start |
490 MI_BATCH_NON_SECURE);
491 }
492 } 673 }
493 intel_ring_advance(dev, ring);
494 } 674 }
495 675 intel_ring_advance(ring);
496 if (IS_G4X(dev) || IS_GEN5(dev)) {
497 intel_ring_begin(dev, ring, 2);
498 intel_ring_emit(dev, ring, MI_FLUSH |
499 MI_NO_WRITE_FLUSH |
500 MI_INVALIDATE_ISP );
501 intel_ring_emit(dev, ring, MI_NOOP);
502 intel_ring_advance(dev, ring);
503 }
504 /* XXX breadcrumb */
505 676
506 return 0; 677 return 0;
507} 678}
508 679
509static void cleanup_status_page(struct drm_device *dev, 680static void cleanup_status_page(struct intel_ring_buffer *ring)
510 struct intel_ring_buffer *ring)
511{ 681{
512 drm_i915_private_t *dev_priv = dev->dev_private; 682 drm_i915_private_t *dev_priv = ring->dev->dev_private;
513 struct drm_gem_object *obj; 683 struct drm_i915_gem_object *obj;
514 struct drm_i915_gem_object *obj_priv;
515 684
516 obj = ring->status_page.obj; 685 obj = ring->status_page.obj;
517 if (obj == NULL) 686 if (obj == NULL)
518 return; 687 return;
519 obj_priv = to_intel_bo(obj);
520 688
521 kunmap(obj_priv->pages[0]); 689 kunmap(obj->pages[0]);
522 i915_gem_object_unpin(obj); 690 i915_gem_object_unpin(obj);
523 drm_gem_object_unreference(obj); 691 drm_gem_object_unreference(&obj->base);
524 ring->status_page.obj = NULL; 692 ring->status_page.obj = NULL;
525 693
526 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 694 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
527} 695}
528 696
529static int init_status_page(struct drm_device *dev, 697static int init_status_page(struct intel_ring_buffer *ring)
530 struct intel_ring_buffer *ring)
531{ 698{
699 struct drm_device *dev = ring->dev;
532 drm_i915_private_t *dev_priv = dev->dev_private; 700 drm_i915_private_t *dev_priv = dev->dev_private;
533 struct drm_gem_object *obj; 701 struct drm_i915_gem_object *obj;
534 struct drm_i915_gem_object *obj_priv;
535 int ret; 702 int ret;
536 703
537 obj = i915_gem_alloc_object(dev, 4096); 704 obj = i915_gem_alloc_object(dev, 4096);
@@ -540,16 +707,15 @@ static int init_status_page(struct drm_device *dev,
540 ret = -ENOMEM; 707 ret = -ENOMEM;
541 goto err; 708 goto err;
542 } 709 }
543 obj_priv = to_intel_bo(obj); 710 obj->agp_type = AGP_USER_CACHED_MEMORY;
544 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
545 711
546 ret = i915_gem_object_pin(obj, 4096); 712 ret = i915_gem_object_pin(obj, 4096, true);
547 if (ret != 0) { 713 if (ret != 0) {
548 goto err_unref; 714 goto err_unref;
549 } 715 }
550 716
551 ring->status_page.gfx_addr = obj_priv->gtt_offset; 717 ring->status_page.gfx_addr = obj->gtt_offset;
552 ring->status_page.page_addr = kmap(obj_priv->pages[0]); 718 ring->status_page.page_addr = kmap(obj->pages[0]);
553 if (ring->status_page.page_addr == NULL) { 719 if (ring->status_page.page_addr == NULL) {
554 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 720 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
555 goto err_unpin; 721 goto err_unpin;
@@ -557,7 +723,7 @@ static int init_status_page(struct drm_device *dev,
557 ring->status_page.obj = obj; 723 ring->status_page.obj = obj;
558 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 724 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
559 725
560 intel_ring_setup_status_page(dev, ring); 726 intel_ring_setup_status_page(ring);
561 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 727 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
562 ring->name, ring->status_page.gfx_addr); 728 ring->name, ring->status_page.gfx_addr);
563 729
@@ -566,7 +732,7 @@ static int init_status_page(struct drm_device *dev,
566err_unpin: 732err_unpin:
567 i915_gem_object_unpin(obj); 733 i915_gem_object_unpin(obj);
568err_unref: 734err_unref:
569 drm_gem_object_unreference(obj); 735 drm_gem_object_unreference(&obj->base);
570err: 736err:
571 return ret; 737 return ret;
572} 738}
@@ -574,9 +740,7 @@ err:
574int intel_init_ring_buffer(struct drm_device *dev, 740int intel_init_ring_buffer(struct drm_device *dev,
575 struct intel_ring_buffer *ring) 741 struct intel_ring_buffer *ring)
576{ 742{
577 struct drm_i915_private *dev_priv = dev->dev_private; 743 struct drm_i915_gem_object *obj;
578 struct drm_i915_gem_object *obj_priv;
579 struct drm_gem_object *obj;
580 int ret; 744 int ret;
581 745
582 ring->dev = dev; 746 ring->dev = dev;
@@ -585,7 +749,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
585 INIT_LIST_HEAD(&ring->gpu_write_list); 749 INIT_LIST_HEAD(&ring->gpu_write_list);
586 750
587 if (I915_NEED_GFX_HWS(dev)) { 751 if (I915_NEED_GFX_HWS(dev)) {
588 ret = init_status_page(dev, ring); 752 ret = init_status_page(ring);
589 if (ret) 753 if (ret)
590 return ret; 754 return ret;
591 } 755 }
@@ -597,15 +761,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
597 goto err_hws; 761 goto err_hws;
598 } 762 }
599 763
600 ring->gem_object = obj; 764 ring->obj = obj;
601 765
602 ret = i915_gem_object_pin(obj, PAGE_SIZE); 766 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
603 if (ret) 767 if (ret)
604 goto err_unref; 768 goto err_unref;
605 769
606 obj_priv = to_intel_bo(obj);
607 ring->map.size = ring->size; 770 ring->map.size = ring->size;
608 ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 771 ring->map.offset = dev->agp->base + obj->gtt_offset;
609 ring->map.type = 0; 772 ring->map.type = 0;
610 ring->map.flags = 0; 773 ring->map.flags = 0;
611 ring->map.mtrr = 0; 774 ring->map.mtrr = 0;
@@ -618,60 +781,57 @@ int intel_init_ring_buffer(struct drm_device *dev,
618 } 781 }
619 782
620 ring->virtual_start = ring->map.handle; 783 ring->virtual_start = ring->map.handle;
621 ret = ring->init(dev, ring); 784 ret = ring->init(ring);
622 if (ret) 785 if (ret)
623 goto err_unmap; 786 goto err_unmap;
624 787
625 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 788 return 0;
626 i915_kernel_lost_context(dev);
627 else {
628 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
629 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
630 ring->space = ring->head - (ring->tail + 8);
631 if (ring->space < 0)
632 ring->space += ring->size;
633 }
634 return ret;
635 789
636err_unmap: 790err_unmap:
637 drm_core_ioremapfree(&ring->map, dev); 791 drm_core_ioremapfree(&ring->map, dev);
638err_unpin: 792err_unpin:
639 i915_gem_object_unpin(obj); 793 i915_gem_object_unpin(obj);
640err_unref: 794err_unref:
641 drm_gem_object_unreference(obj); 795 drm_gem_object_unreference(&obj->base);
642 ring->gem_object = NULL; 796 ring->obj = NULL;
643err_hws: 797err_hws:
644 cleanup_status_page(dev, ring); 798 cleanup_status_page(ring);
645 return ret; 799 return ret;
646} 800}
647 801
648void intel_cleanup_ring_buffer(struct drm_device *dev, 802void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
649 struct intel_ring_buffer *ring)
650{ 803{
651 if (ring->gem_object == NULL) 804 struct drm_i915_private *dev_priv;
805 int ret;
806
807 if (ring->obj == NULL)
652 return; 808 return;
653 809
654 drm_core_ioremapfree(&ring->map, dev); 810 /* Disable the ring buffer. The ring must be idle at this point */
811 dev_priv = ring->dev->dev_private;
812 ret = intel_wait_ring_buffer(ring, ring->size - 8);
813 I915_WRITE_CTL(ring, 0);
655 814
656 i915_gem_object_unpin(ring->gem_object); 815 drm_core_ioremapfree(&ring->map, ring->dev);
657 drm_gem_object_unreference(ring->gem_object); 816
658 ring->gem_object = NULL; 817 i915_gem_object_unpin(ring->obj);
818 drm_gem_object_unreference(&ring->obj->base);
819 ring->obj = NULL;
659 820
660 if (ring->cleanup) 821 if (ring->cleanup)
661 ring->cleanup(ring); 822 ring->cleanup(ring);
662 823
663 cleanup_status_page(dev, ring); 824 cleanup_status_page(ring);
664} 825}
665 826
666static int intel_wrap_ring_buffer(struct drm_device *dev, 827static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
667 struct intel_ring_buffer *ring)
668{ 828{
669 unsigned int *virt; 829 unsigned int *virt;
670 int rem; 830 int rem;
671 rem = ring->size - ring->tail; 831 rem = ring->size - ring->tail;
672 832
673 if (ring->space < rem) { 833 if (ring->space < rem) {
674 int ret = intel_wait_ring_buffer(dev, ring, rem); 834 int ret = intel_wait_ring_buffer(ring, rem);
675 if (ret) 835 if (ret)
676 return ret; 836 return ret;
677 } 837 }
@@ -689,11 +849,11 @@ static int intel_wrap_ring_buffer(struct drm_device *dev,
689 return 0; 849 return 0;
690} 850}
691 851
692int intel_wait_ring_buffer(struct drm_device *dev, 852int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
693 struct intel_ring_buffer *ring, int n)
694{ 853{
854 struct drm_device *dev = ring->dev;
855 struct drm_i915_private *dev_priv = dev->dev_private;
695 unsigned long end; 856 unsigned long end;
696 drm_i915_private_t *dev_priv = dev->dev_private;
697 u32 head; 857 u32 head;
698 858
699 trace_i915_ring_wait_begin (dev); 859 trace_i915_ring_wait_begin (dev);
@@ -711,7 +871,7 @@ int intel_wait_ring_buffer(struct drm_device *dev,
711 if (ring->space < 0) 871 if (ring->space < 0)
712 ring->space += ring->size; 872 ring->space += ring->size;
713 if (ring->space >= n) { 873 if (ring->space >= n) {
714 trace_i915_ring_wait_end (dev); 874 trace_i915_ring_wait_end(dev);
715 return 0; 875 return 0;
716 } 876 }
717 877
@@ -722,29 +882,39 @@ int intel_wait_ring_buffer(struct drm_device *dev,
722 } 882 }
723 883
724 msleep(1); 884 msleep(1);
885 if (atomic_read(&dev_priv->mm.wedged))
886 return -EAGAIN;
725 } while (!time_after(jiffies, end)); 887 } while (!time_after(jiffies, end));
726 trace_i915_ring_wait_end (dev); 888 trace_i915_ring_wait_end (dev);
727 return -EBUSY; 889 return -EBUSY;
728} 890}
729 891
730void intel_ring_begin(struct drm_device *dev, 892int intel_ring_begin(struct intel_ring_buffer *ring,
731 struct intel_ring_buffer *ring, 893 int num_dwords)
732 int num_dwords)
733{ 894{
734 int n = 4*num_dwords; 895 int n = 4*num_dwords;
735 if (unlikely(ring->tail + n > ring->size)) 896 int ret;
736 intel_wrap_ring_buffer(dev, ring); 897
737 if (unlikely(ring->space < n)) 898 if (unlikely(ring->tail + n > ring->size)) {
738 intel_wait_ring_buffer(dev, ring, n); 899 ret = intel_wrap_ring_buffer(ring);
900 if (unlikely(ret))
901 return ret;
902 }
903
904 if (unlikely(ring->space < n)) {
905 ret = intel_wait_ring_buffer(ring, n);
906 if (unlikely(ret))
907 return ret;
908 }
739 909
740 ring->space -= n; 910 ring->space -= n;
911 return 0;
741} 912}
742 913
743void intel_ring_advance(struct drm_device *dev, 914void intel_ring_advance(struct intel_ring_buffer *ring)
744 struct intel_ring_buffer *ring)
745{ 915{
746 ring->tail &= ring->size - 1; 916 ring->tail &= ring->size - 1;
747 ring->write_tail(dev, ring, ring->tail); 917 ring->write_tail(ring, ring->tail);
748} 918}
749 919
750static const struct intel_ring_buffer render_ring = { 920static const struct intel_ring_buffer render_ring = {
@@ -756,10 +926,11 @@ static const struct intel_ring_buffer render_ring = {
756 .write_tail = ring_write_tail, 926 .write_tail = ring_write_tail,
757 .flush = render_ring_flush, 927 .flush = render_ring_flush,
758 .add_request = render_ring_add_request, 928 .add_request = render_ring_add_request,
759 .get_seqno = render_ring_get_seqno, 929 .get_seqno = ring_get_seqno,
760 .user_irq_get = render_ring_get_user_irq, 930 .irq_get = render_ring_get_irq,
761 .user_irq_put = render_ring_put_user_irq, 931 .irq_put = render_ring_put_irq,
762 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, 932 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
933 .cleanup = render_ring_cleanup,
763}; 934};
764 935
765/* ring buffer for bit-stream decoder */ 936/* ring buffer for bit-stream decoder */
@@ -769,22 +940,21 @@ static const struct intel_ring_buffer bsd_ring = {
769 .id = RING_BSD, 940 .id = RING_BSD,
770 .mmio_base = BSD_RING_BASE, 941 .mmio_base = BSD_RING_BASE,
771 .size = 32 * PAGE_SIZE, 942 .size = 32 * PAGE_SIZE,
772 .init = init_bsd_ring, 943 .init = init_ring_common,
773 .write_tail = ring_write_tail, 944 .write_tail = ring_write_tail,
774 .flush = bsd_ring_flush, 945 .flush = bsd_ring_flush,
775 .add_request = ring_add_request, 946 .add_request = ring_add_request,
776 .get_seqno = ring_status_page_get_seqno, 947 .get_seqno = ring_get_seqno,
777 .user_irq_get = bsd_ring_get_user_irq, 948 .irq_get = bsd_ring_get_irq,
778 .user_irq_put = bsd_ring_put_user_irq, 949 .irq_put = bsd_ring_put_irq,
779 .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, 950 .dispatch_execbuffer = ring_dispatch_execbuffer,
780}; 951};
781 952
782 953
783static void gen6_bsd_ring_write_tail(struct drm_device *dev, 954static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
784 struct intel_ring_buffer *ring,
785 u32 value) 955 u32 value)
786{ 956{
787 drm_i915_private_t *dev_priv = dev->dev_private; 957 drm_i915_private_t *dev_priv = ring->dev->dev_private;
788 958
789 /* Every tail move must follow the sequence below */ 959 /* Every tail move must follow the sequence below */
790 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 960 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -803,69 +973,80 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev,
803 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 973 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
804} 974}
805 975
806static void gen6_ring_flush(struct drm_device *dev, 976static void gen6_ring_flush(struct intel_ring_buffer *ring,
807 struct intel_ring_buffer *ring,
808 u32 invalidate_domains, 977 u32 invalidate_domains,
809 u32 flush_domains) 978 u32 flush_domains)
810{ 979{
811 intel_ring_begin(dev, ring, 4); 980 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
812 intel_ring_emit(dev, ring, MI_FLUSH_DW); 981 return;
813 intel_ring_emit(dev, ring, 0); 982
814 intel_ring_emit(dev, ring, 0); 983 if (intel_ring_begin(ring, 4) == 0) {
815 intel_ring_emit(dev, ring, 0); 984 intel_ring_emit(ring, MI_FLUSH_DW);
816 intel_ring_advance(dev, ring); 985 intel_ring_emit(ring, 0);
986 intel_ring_emit(ring, 0);
987 intel_ring_emit(ring, 0);
988 intel_ring_advance(ring);
989 }
817} 990}
818 991
819static int 992static int
820gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, 993gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
821 struct intel_ring_buffer *ring, 994 u32 offset, u32 len)
822 struct drm_i915_gem_execbuffer2 *exec,
823 struct drm_clip_rect *cliprects,
824 uint64_t exec_offset)
825{ 995{
826 uint32_t exec_start; 996 int ret;
827 997
828 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 998 ret = intel_ring_begin(ring, 2);
999 if (ret)
1000 return ret;
829 1001
830 intel_ring_begin(dev, ring, 2); 1002 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
831 intel_ring_emit(dev, ring,
832 MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
833 /* bit0-7 is the length on GEN6+ */ 1003 /* bit0-7 is the length on GEN6+ */
834 intel_ring_emit(dev, ring, exec_start); 1004 intel_ring_emit(ring, offset);
835 intel_ring_advance(dev, ring); 1005 intel_ring_advance(ring);
836 1006
837 return 0; 1007 return 0;
838} 1008}
839 1009
1010static bool
1011gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1012{
1013 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1014}
1015
1016static void
1017gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1018{
1019 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1020}
1021
840/* ring buffer for Video Codec for Gen6+ */ 1022/* ring buffer for Video Codec for Gen6+ */
841static const struct intel_ring_buffer gen6_bsd_ring = { 1023static const struct intel_ring_buffer gen6_bsd_ring = {
842 .name = "gen6 bsd ring", 1024 .name = "gen6 bsd ring",
843 .id = RING_BSD, 1025 .id = RING_BSD,
844 .mmio_base = GEN6_BSD_RING_BASE, 1026 .mmio_base = GEN6_BSD_RING_BASE,
845 .size = 32 * PAGE_SIZE, 1027 .size = 32 * PAGE_SIZE,
846 .init = init_bsd_ring, 1028 .init = init_ring_common,
847 .write_tail = gen6_bsd_ring_write_tail, 1029 .write_tail = gen6_bsd_ring_write_tail,
848 .flush = gen6_ring_flush, 1030 .flush = gen6_ring_flush,
849 .add_request = ring_add_request, 1031 .add_request = gen6_add_request,
850 .get_seqno = ring_status_page_get_seqno, 1032 .get_seqno = ring_get_seqno,
851 .user_irq_get = bsd_ring_get_user_irq, 1033 .irq_get = gen6_bsd_ring_get_irq,
852 .user_irq_put = bsd_ring_put_user_irq, 1034 .irq_put = gen6_bsd_ring_put_irq,
853 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 1035 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
854}; 1036};
855 1037
856/* Blitter support (SandyBridge+) */ 1038/* Blitter support (SandyBridge+) */
857 1039
858static void 1040static bool
859blt_ring_get_user_irq(struct drm_device *dev, 1041blt_ring_get_irq(struct intel_ring_buffer *ring)
860 struct intel_ring_buffer *ring)
861{ 1042{
862 /* do nothing */ 1043 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
863} 1044}
1045
864static void 1046static void
865blt_ring_put_user_irq(struct drm_device *dev, 1047blt_ring_put_irq(struct intel_ring_buffer *ring)
866 struct intel_ring_buffer *ring)
867{ 1048{
868 /* do nothing */ 1049 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
869} 1050}
870 1051
871 1052
@@ -883,32 +1064,31 @@ to_blt_workaround(struct intel_ring_buffer *ring)
883 return ring->private; 1064 return ring->private;
884} 1065}
885 1066
886static int blt_ring_init(struct drm_device *dev, 1067static int blt_ring_init(struct intel_ring_buffer *ring)
887 struct intel_ring_buffer *ring)
888{ 1068{
889 if (NEED_BLT_WORKAROUND(dev)) { 1069 if (NEED_BLT_WORKAROUND(ring->dev)) {
890 struct drm_i915_gem_object *obj; 1070 struct drm_i915_gem_object *obj;
891 u32 __iomem *ptr; 1071 u32 *ptr;
892 int ret; 1072 int ret;
893 1073
894 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); 1074 obj = i915_gem_alloc_object(ring->dev, 4096);
895 if (obj == NULL) 1075 if (obj == NULL)
896 return -ENOMEM; 1076 return -ENOMEM;
897 1077
898 ret = i915_gem_object_pin(&obj->base, 4096); 1078 ret = i915_gem_object_pin(obj, 4096, true);
899 if (ret) { 1079 if (ret) {
900 drm_gem_object_unreference(&obj->base); 1080 drm_gem_object_unreference(&obj->base);
901 return ret; 1081 return ret;
902 } 1082 }
903 1083
904 ptr = kmap(obj->pages[0]); 1084 ptr = kmap(obj->pages[0]);
905 iowrite32(MI_BATCH_BUFFER_END, ptr); 1085 *ptr++ = MI_BATCH_BUFFER_END;
906 iowrite32(MI_NOOP, ptr+1); 1086 *ptr++ = MI_NOOP;
907 kunmap(obj->pages[0]); 1087 kunmap(obj->pages[0]);
908 1088
909 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); 1089 ret = i915_gem_object_set_to_gtt_domain(obj, false);
910 if (ret) { 1090 if (ret) {
911 i915_gem_object_unpin(&obj->base); 1091 i915_gem_object_unpin(obj);
912 drm_gem_object_unreference(&obj->base); 1092 drm_gem_object_unreference(&obj->base);
913 return ret; 1093 return ret;
914 } 1094 }
@@ -916,51 +1096,39 @@ static int blt_ring_init(struct drm_device *dev,
916 ring->private = obj; 1096 ring->private = obj;
917 } 1097 }
918 1098
919 return init_ring_common(dev, ring); 1099 return init_ring_common(ring);
920} 1100}
921 1101
922static void blt_ring_begin(struct drm_device *dev, 1102static int blt_ring_begin(struct intel_ring_buffer *ring,
923 struct intel_ring_buffer *ring,
924 int num_dwords) 1103 int num_dwords)
925{ 1104{
926 if (ring->private) { 1105 if (ring->private) {
927 intel_ring_begin(dev, ring, num_dwords+2); 1106 int ret = intel_ring_begin(ring, num_dwords+2);
928 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); 1107 if (ret)
929 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); 1108 return ret;
1109
1110 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1111 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1112
1113 return 0;
930 } else 1114 } else
931 intel_ring_begin(dev, ring, 4); 1115 return intel_ring_begin(ring, 4);
932} 1116}
933 1117
934static void blt_ring_flush(struct drm_device *dev, 1118static void blt_ring_flush(struct intel_ring_buffer *ring,
935 struct intel_ring_buffer *ring,
936 u32 invalidate_domains, 1119 u32 invalidate_domains,
937 u32 flush_domains) 1120 u32 flush_domains)
938{ 1121{
939 blt_ring_begin(dev, ring, 4); 1122 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
940 intel_ring_emit(dev, ring, MI_FLUSH_DW); 1123 return;
941 intel_ring_emit(dev, ring, 0);
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_advance(dev, ring);
945}
946
947static u32
948blt_ring_add_request(struct drm_device *dev,
949 struct intel_ring_buffer *ring,
950 u32 flush_domains)
951{
952 u32 seqno = i915_gem_get_seqno(dev);
953
954 blt_ring_begin(dev, ring, 4);
955 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
956 intel_ring_emit(dev, ring,
957 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
958 intel_ring_emit(dev, ring, seqno);
959 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
960 intel_ring_advance(dev, ring);
961 1124
962 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 1125 if (blt_ring_begin(ring, 4) == 0) {
963 return seqno; 1126 intel_ring_emit(ring, MI_FLUSH_DW);
1127 intel_ring_emit(ring, 0);
1128 intel_ring_emit(ring, 0);
1129 intel_ring_emit(ring, 0);
1130 intel_ring_advance(ring);
1131 }
964} 1132}
965 1133
966static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1134static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -981,47 +1149,54 @@ static const struct intel_ring_buffer gen6_blt_ring = {
981 .init = blt_ring_init, 1149 .init = blt_ring_init,
982 .write_tail = ring_write_tail, 1150 .write_tail = ring_write_tail,
983 .flush = blt_ring_flush, 1151 .flush = blt_ring_flush,
984 .add_request = blt_ring_add_request, 1152 .add_request = gen6_add_request,
985 .get_seqno = ring_status_page_get_seqno, 1153 .get_seqno = ring_get_seqno,
986 .user_irq_get = blt_ring_get_user_irq, 1154 .irq_get = blt_ring_get_irq,
987 .user_irq_put = blt_ring_put_user_irq, 1155 .irq_put = blt_ring_put_irq,
988 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 1156 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
989 .cleanup = blt_ring_cleanup, 1157 .cleanup = blt_ring_cleanup,
990}; 1158};
991 1159
992int intel_init_render_ring_buffer(struct drm_device *dev) 1160int intel_init_render_ring_buffer(struct drm_device *dev)
993{ 1161{
994 drm_i915_private_t *dev_priv = dev->dev_private; 1162 drm_i915_private_t *dev_priv = dev->dev_private;
995 1163 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
996 dev_priv->render_ring = render_ring; 1164
1165 *ring = render_ring;
1166 if (INTEL_INFO(dev)->gen >= 6) {
1167 ring->add_request = gen6_add_request;
1168 } else if (IS_GEN5(dev)) {
1169 ring->add_request = pc_render_add_request;
1170 ring->get_seqno = pc_render_get_seqno;
1171 }
997 1172
998 if (!I915_NEED_GFX_HWS(dev)) { 1173 if (!I915_NEED_GFX_HWS(dev)) {
999 dev_priv->render_ring.status_page.page_addr 1174 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1000 = dev_priv->status_page_dmah->vaddr; 1175 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1001 memset(dev_priv->render_ring.status_page.page_addr,
1002 0, PAGE_SIZE);
1003 } 1176 }
1004 1177
1005 return intel_init_ring_buffer(dev, &dev_priv->render_ring); 1178 return intel_init_ring_buffer(dev, ring);
1006} 1179}
1007 1180
1008int intel_init_bsd_ring_buffer(struct drm_device *dev) 1181int intel_init_bsd_ring_buffer(struct drm_device *dev)
1009{ 1182{
1010 drm_i915_private_t *dev_priv = dev->dev_private; 1183 drm_i915_private_t *dev_priv = dev->dev_private;
1184 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1011 1185
1012 if (IS_GEN6(dev)) 1186 if (IS_GEN6(dev))
1013 dev_priv->bsd_ring = gen6_bsd_ring; 1187 *ring = gen6_bsd_ring;
1014 else 1188 else
1015 dev_priv->bsd_ring = bsd_ring; 1189 *ring = bsd_ring;
1016 1190
1017 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); 1191 return intel_init_ring_buffer(dev, ring);
1018} 1192}
1019 1193
1020int intel_init_blt_ring_buffer(struct drm_device *dev) 1194int intel_init_blt_ring_buffer(struct drm_device *dev)
1021{ 1195{
1022 drm_i915_private_t *dev_priv = dev->dev_private; 1196 drm_i915_private_t *dev_priv = dev->dev_private;
1197 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1023 1198
1024 dev_priv->blt_ring = gen6_blt_ring; 1199 *ring = gen6_blt_ring;
1025 1200
1026 return intel_init_ring_buffer(dev, &dev_priv->blt_ring); 1201 return intel_init_ring_buffer(dev, ring);
1027} 1202}