aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-10-27 07:45:26 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-27 18:26:34 -0400
commite1f99ce6cac3b6a95551642be5ddb5d9c46bea76 (patch)
treefb5152a582fc5b6c190287d9c90d57ca415d6f9d /drivers/gpu/drm
parent78501eac34f372bfbeb4e1d9de688c13efa916f6 (diff)
drm/i915: Propagate errors from writing to ringbuffer
Preparing the ringbuffer for adding new commands can fail (a timeout whilst waiting for the GPU to catch up and free some space). So check for any potential error before overwriting HEAD with new commands, and propagate that error back to the user where possible. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c119
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c51
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c189
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
8 files changed, 244 insertions, 195 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8a171394a9cf..02daf4e5c8e6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -263,7 +263,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
263 * instruction detected will be given a size of zero, which is a 263 * instruction detected will be given a size of zero, which is a
264 * signal to abort the rest of the buffer. 264 * signal to abort the rest of the buffer.
265 */ 265 */
266static int do_validate_cmd(int cmd) 266static int validate_cmd(int cmd)
267{ 267{
268 switch (((cmd >> 29) & 0x7)) { 268 switch (((cmd >> 29) & 0x7)) {
269 case 0x0: 269 case 0x0:
@@ -321,40 +321,27 @@ static int do_validate_cmd(int cmd)
321 return 0; 321 return 0;
322} 322}
323 323
324static int validate_cmd(int cmd)
325{
326 int ret = do_validate_cmd(cmd);
327
328/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
329
330 return ret;
331}
332
333static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 324static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
334{ 325{
335 drm_i915_private_t *dev_priv = dev->dev_private; 326 drm_i915_private_t *dev_priv = dev->dev_private;
336 int i; 327 int i, ret;
337 328
338 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 329 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
339 return -EINVAL; 330 return -EINVAL;
340 331
341 BEGIN_LP_RING((dwords+1)&~1);
342
343 for (i = 0; i < dwords;) { 332 for (i = 0; i < dwords;) {
344 int cmd, sz; 333 int sz = validate_cmd(buffer[i]);
345 334 if (sz == 0 || i + sz > dwords)
346 cmd = buffer[i];
347
348 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
349 return -EINVAL; 335 return -EINVAL;
350 336 i += sz;
351 OUT_RING(cmd);
352
353 while (++i, --sz) {
354 OUT_RING(buffer[i]);
355 }
356 } 337 }
357 338
339 ret = BEGIN_LP_RING((dwords+1)&~1);
340 if (ret)
341 return ret;
342
343 for (i = 0; i < dwords; i++)
344 OUT_RING(buffer[i]);
358 if (dwords & 1) 345 if (dwords & 1)
359 OUT_RING(0); 346 OUT_RING(0);
360 347
@@ -368,7 +355,9 @@ i915_emit_box(struct drm_device *dev,
368 struct drm_clip_rect *boxes, 355 struct drm_clip_rect *boxes,
369 int i, int DR1, int DR4) 356 int i, int DR1, int DR4)
370{ 357{
358 struct drm_i915_private *dev_priv = dev->dev_private;
371 struct drm_clip_rect box = boxes[i]; 359 struct drm_clip_rect box = boxes[i];
360 int ret;
372 361
373 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 362 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
374 DRM_ERROR("Bad box %d,%d..%d,%d\n", 363 DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -377,22 +366,27 @@ i915_emit_box(struct drm_device *dev,
377 } 366 }
378 367
379 if (INTEL_INFO(dev)->gen >= 4) { 368 if (INTEL_INFO(dev)->gen >= 4) {
380 BEGIN_LP_RING(4); 369 ret = BEGIN_LP_RING(4);
370 if (ret)
371 return ret;
372
381 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 373 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
382 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 374 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
383 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 375 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
384 OUT_RING(DR4); 376 OUT_RING(DR4);
385 ADVANCE_LP_RING();
386 } else { 377 } else {
387 BEGIN_LP_RING(6); 378 ret = BEGIN_LP_RING(6);
379 if (ret)
380 return ret;
381
388 OUT_RING(GFX_OP_DRAWRECT_INFO); 382 OUT_RING(GFX_OP_DRAWRECT_INFO);
389 OUT_RING(DR1); 383 OUT_RING(DR1);
390 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 384 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
391 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 385 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
392 OUT_RING(DR4); 386 OUT_RING(DR4);
393 OUT_RING(0); 387 OUT_RING(0);
394 ADVANCE_LP_RING();
395 } 388 }
389 ADVANCE_LP_RING();
396 390
397 return 0; 391 return 0;
398} 392}
@@ -412,12 +406,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
412 if (master_priv->sarea_priv) 406 if (master_priv->sarea_priv)
413 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 407 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
414 408
415 BEGIN_LP_RING(4); 409 if (BEGIN_LP_RING(4) == 0) {
416 OUT_RING(MI_STORE_DWORD_INDEX); 410 OUT_RING(MI_STORE_DWORD_INDEX);
417 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 411 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
418 OUT_RING(dev_priv->counter); 412 OUT_RING(dev_priv->counter);
419 OUT_RING(0); 413 OUT_RING(0);
420 ADVANCE_LP_RING(); 414 ADVANCE_LP_RING();
415 }
421} 416}
422 417
423static int i915_dispatch_cmdbuffer(struct drm_device * dev, 418static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -458,8 +453,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
458 drm_i915_batchbuffer_t * batch, 453 drm_i915_batchbuffer_t * batch,
459 struct drm_clip_rect *cliprects) 454 struct drm_clip_rect *cliprects)
460{ 455{
456 struct drm_i915_private *dev_priv = dev->dev_private;
461 int nbox = batch->num_cliprects; 457 int nbox = batch->num_cliprects;
462 int i = 0, count; 458 int i, count, ret;
463 459
464 if ((batch->start | batch->used) & 0x7) { 460 if ((batch->start | batch->used) & 0x7) {
465 DRM_ERROR("alignment"); 461 DRM_ERROR("alignment");
@@ -469,17 +465,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
469 i915_kernel_lost_context(dev); 465 i915_kernel_lost_context(dev);
470 466
471 count = nbox ? nbox : 1; 467 count = nbox ? nbox : 1;
472
473 for (i = 0; i < count; i++) { 468 for (i = 0; i < count; i++) {
474 if (i < nbox) { 469 if (i < nbox) {
475 int ret = i915_emit_box(dev, cliprects, i, 470 ret = i915_emit_box(dev, cliprects, i,
476 batch->DR1, batch->DR4); 471 batch->DR1, batch->DR4);
477 if (ret) 472 if (ret)
478 return ret; 473 return ret;
479 } 474 }
480 475
481 if (!IS_I830(dev) && !IS_845G(dev)) { 476 if (!IS_I830(dev) && !IS_845G(dev)) {
482 BEGIN_LP_RING(2); 477 ret = BEGIN_LP_RING(2);
478 if (ret)
479 return ret;
480
483 if (INTEL_INFO(dev)->gen >= 4) { 481 if (INTEL_INFO(dev)->gen >= 4) {
484 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 482 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
485 OUT_RING(batch->start); 483 OUT_RING(batch->start);
@@ -487,26 +485,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
487 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
488 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 486 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
489 } 487 }
490 ADVANCE_LP_RING();
491 } else { 488 } else {
492 BEGIN_LP_RING(4); 489 ret = BEGIN_LP_RING(4);
490 if (ret)
491 return ret;
492
493 OUT_RING(MI_BATCH_BUFFER); 493 OUT_RING(MI_BATCH_BUFFER);
494 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 494 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
495 OUT_RING(batch->start + batch->used - 4); 495 OUT_RING(batch->start + batch->used - 4);
496 OUT_RING(0); 496 OUT_RING(0);
497 ADVANCE_LP_RING();
498 } 497 }
498 ADVANCE_LP_RING();
499 } 499 }
500 500
501 501
502 if (IS_G4X(dev) || IS_GEN5(dev)) { 502 if (IS_G4X(dev) || IS_GEN5(dev)) {
503 BEGIN_LP_RING(2); 503 if (BEGIN_LP_RING(2) == 0) {
504 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 504 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
505 OUT_RING(MI_NOOP); 505 OUT_RING(MI_NOOP);
506 ADVANCE_LP_RING(); 506 ADVANCE_LP_RING();
507 }
507 } 508 }
508 i915_emit_breadcrumb(dev);
509 509
510 i915_emit_breadcrumb(dev);
510 return 0; 511 return 0;
511} 512}
512 513
@@ -515,6 +516,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
515 drm_i915_private_t *dev_priv = dev->dev_private; 516 drm_i915_private_t *dev_priv = dev->dev_private;
516 struct drm_i915_master_private *master_priv = 517 struct drm_i915_master_private *master_priv =
517 dev->primary->master->driver_priv; 518 dev->primary->master->driver_priv;
519 int ret;
518 520
519 if (!master_priv->sarea_priv) 521 if (!master_priv->sarea_priv)
520 return -EINVAL; 522 return -EINVAL;
@@ -526,12 +528,13 @@ static int i915_dispatch_flip(struct drm_device * dev)
526 528
527 i915_kernel_lost_context(dev); 529 i915_kernel_lost_context(dev);
528 530
529 BEGIN_LP_RING(2); 531 ret = BEGIN_LP_RING(10);
532 if (ret)
533 return ret;
534
530 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 535 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
531 OUT_RING(0); 536 OUT_RING(0);
532 ADVANCE_LP_RING();
533 537
534 BEGIN_LP_RING(6);
535 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 538 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
536 OUT_RING(0); 539 OUT_RING(0);
537 if (dev_priv->current_page == 0) { 540 if (dev_priv->current_page == 0) {
@@ -542,21 +545,21 @@ static int i915_dispatch_flip(struct drm_device * dev)
542 dev_priv->current_page = 0; 545 dev_priv->current_page = 0;
543 } 546 }
544 OUT_RING(0); 547 OUT_RING(0);
545 ADVANCE_LP_RING();
546 548
547 BEGIN_LP_RING(2);
548 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 549 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
549 OUT_RING(0); 550 OUT_RING(0);
551
550 ADVANCE_LP_RING(); 552 ADVANCE_LP_RING();
551 553
552 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 554 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
553 555
554 BEGIN_LP_RING(4); 556 if (BEGIN_LP_RING(4) == 0) {
555 OUT_RING(MI_STORE_DWORD_INDEX); 557 OUT_RING(MI_STORE_DWORD_INDEX);
556 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 558 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
557 OUT_RING(dev_priv->counter); 559 OUT_RING(dev_priv->counter);
558 OUT_RING(0); 560 OUT_RING(0);
559 ADVANCE_LP_RING(); 561 ADVANCE_LP_RING();
562 }
560 563
561 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 564 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
562 return 0; 565 return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6fb225f6b2c8..c241468c632e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1216,30 +1216,14 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1216#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ 1216#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
1217 I915_DEBUG_WRITE)) 1217 I915_DEBUG_WRITE))
1218 1218
1219#define I915_VERBOSE 0 1219#define BEGIN_LP_RING(n) \
1220 intel_ring_begin(&dev_priv->render_ring, (n))
1220 1221
1221#define BEGIN_LP_RING(n) do { \ 1222#define OUT_RING(x) \
1222 drm_i915_private_t *dev_priv__ = dev->dev_private; \ 1223 intel_ring_emit(&dev_priv->render_ring, x)
1223 if (I915_VERBOSE) \
1224 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1225 intel_ring_begin(&dev_priv__->render_ring, (n)); \
1226} while (0)
1227
1228
1229#define OUT_RING(x) do { \
1230 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1231 if (I915_VERBOSE) \
1232 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
1233 intel_ring_emit(&dev_priv__->render_ring, x); \
1234} while (0)
1235 1224
1236#define ADVANCE_LP_RING() do { \ 1225#define ADVANCE_LP_RING() \
1237 drm_i915_private_t *dev_priv__ = dev->dev_private; \ 1226 intel_ring_advance(&dev_priv->render_ring)
1238 if (I915_VERBOSE) \
1239 DRM_DEBUG("ADVANCE_LP_RING %x\n", \
1240 dev_priv__->render_ring.tail); \
1241 intel_ring_advance(&dev_priv__->render_ring); \
1242} while(0)
1243 1227
1244/** 1228/**
1245 * Reads a dword out of the status page, which is written to from the command 1229 * Reads a dword out of the status page, which is written to from the command
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 97bf7c87d857..00e901483ba5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3826,7 +3826,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3826 else 3826 else
3827 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 3827 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3828 3828
3829 intel_ring_begin(ring, 2); 3829 ret = intel_ring_begin(ring, 2);
3830 if (ret)
3831 goto err;
3832
3830 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 3833 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
3831 intel_ring_emit(ring, MI_NOOP); 3834 intel_ring_emit(ring, MI_NOOP);
3832 intel_ring_advance(ring); 3835 intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 852a2d848bf4..8acdd6d857d3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1101,12 +1101,13 @@ static int i915_emit_irq(struct drm_device * dev)
1101 if (master_priv->sarea_priv) 1101 if (master_priv->sarea_priv)
1102 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 1102 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1103 1103
1104 BEGIN_LP_RING(4); 1104 if (BEGIN_LP_RING(4) == 0) {
1105 OUT_RING(MI_STORE_DWORD_INDEX); 1105 OUT_RING(MI_STORE_DWORD_INDEX);
1106 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1106 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1107 OUT_RING(dev_priv->counter); 1107 OUT_RING(dev_priv->counter);
1108 OUT_RING(MI_USER_INTERRUPT); 1108 OUT_RING(MI_USER_INTERRUPT);
1109 ADVANCE_LP_RING(); 1109 ADVANCE_LP_RING();
1110 }
1110 1111
1111 return dev_priv->counter; 1112 return dev_priv->counter;
1112} 1113}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b2..eb4c725e3069 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5090,22 +5090,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5090 if (ret) 5090 if (ret)
5091 goto cleanup_objs; 5091 goto cleanup_objs;
5092 5092
5093 /* Block clients from rendering to the new back buffer until
5094 * the flip occurs and the object is no longer visible.
5095 */
5096 atomic_add(1 << intel_crtc->plane,
5097 &to_intel_bo(work->old_fb_obj)->pending_flip);
5098
5099 work->pending_flip_obj = obj;
5100 obj_priv = to_intel_bo(obj);
5101
5102 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5093 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5103 u32 flip_mask; 5094 u32 flip_mask;
5104 5095
5105 /* Can't queue multiple flips, so wait for the previous 5096 /* Can't queue multiple flips, so wait for the previous
5106 * one to finish before executing the next. 5097 * one to finish before executing the next.
5107 */ 5098 */
5108 BEGIN_LP_RING(2); 5099 ret = BEGIN_LP_RING(2);
5100 if (ret)
5101 goto cleanup_objs;
5102
5109 if (intel_crtc->plane) 5103 if (intel_crtc->plane)
5110 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5104 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5111 else 5105 else
@@ -5115,13 +5109,25 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5115 ADVANCE_LP_RING(); 5109 ADVANCE_LP_RING();
5116 } 5110 }
5117 5111
5112 work->pending_flip_obj = obj;
5113 obj_priv = to_intel_bo(obj);
5114
5118 work->enable_stall_check = true; 5115 work->enable_stall_check = true;
5119 5116
5120 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5117 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5121 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; 5118 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
5122 5119
5123 BEGIN_LP_RING(4); 5120 ret = BEGIN_LP_RING(4);
5124 switch(INTEL_INFO(dev)->gen) { 5121 if (ret)
5122 goto cleanup_objs;
5123
5124 /* Block clients from rendering to the new back buffer until
5125 * the flip occurs and the object is no longer visible.
5126 */
5127 atomic_add(1 << intel_crtc->plane,
5128 &to_intel_bo(work->old_fb_obj)->pending_flip);
5129
5130 switch (INTEL_INFO(dev)->gen) {
5125 case 2: 5131 case 2:
5126 OUT_RING(MI_DISPLAY_FLIP | 5132 OUT_RING(MI_DISPLAY_FLIP |
5127 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5133 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
@@ -5850,16 +5856,17 @@ void intel_init_clock_gating(struct drm_device *dev)
5850 struct drm_i915_gem_object *obj_priv; 5856 struct drm_i915_gem_object *obj_priv;
5851 obj_priv = to_intel_bo(dev_priv->renderctx); 5857 obj_priv = to_intel_bo(dev_priv->renderctx);
5852 if (obj_priv) { 5858 if (obj_priv) {
5853 BEGIN_LP_RING(4); 5859 if (BEGIN_LP_RING(4) == 0) {
5854 OUT_RING(MI_SET_CONTEXT); 5860 OUT_RING(MI_SET_CONTEXT);
5855 OUT_RING(obj_priv->gtt_offset | 5861 OUT_RING(obj_priv->gtt_offset |
5856 MI_MM_SPACE_GTT | 5862 MI_MM_SPACE_GTT |
5857 MI_SAVE_EXT_STATE_EN | 5863 MI_SAVE_EXT_STATE_EN |
5858 MI_RESTORE_EXT_STATE_EN | 5864 MI_RESTORE_EXT_STATE_EN |
5859 MI_RESTORE_INHIBIT); 5865 MI_RESTORE_INHIBIT);
5860 OUT_RING(MI_NOOP); 5866 OUT_RING(MI_NOOP);
5861 OUT_RING(MI_FLUSH); 5867 OUT_RING(MI_FLUSH);
5862 ADVANCE_LP_RING(); 5868 ADVANCE_LP_RING();
5869 }
5863 } 5870 }
5864 } else 5871 } else
5865 DRM_DEBUG_KMS("Failed to allocate render context." 5872 DRM_DEBUG_KMS("Failed to allocate render context."
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219a..78fa6a249964 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -289,6 +289,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
289static int intel_overlay_on(struct intel_overlay *overlay) 289static int intel_overlay_on(struct intel_overlay *overlay)
290{ 290{
291 struct drm_device *dev = overlay->dev; 291 struct drm_device *dev = overlay->dev;
292 struct drm_i915_private *dev_priv = dev->dev_private;
292 struct drm_i915_gem_request *request; 293 struct drm_i915_gem_request *request;
293 int pipe_a_quirk = 0; 294 int pipe_a_quirk = 0;
294 int ret; 295 int ret;
@@ -308,7 +309,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
308 goto out; 309 goto out;
309 } 310 }
310 311
311 BEGIN_LP_RING(4); 312 ret = BEGIN_LP_RING(4);
313 if (ret) {
314 kfree(request);
315 goto out;
316 }
317
312 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 318 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
313 OUT_RING(overlay->flip_addr | OFC_UPDATE); 319 OUT_RING(overlay->flip_addr | OFC_UPDATE);
314 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 320 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +338,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
332 struct drm_i915_gem_request *request; 338 struct drm_i915_gem_request *request;
333 u32 flip_addr = overlay->flip_addr; 339 u32 flip_addr = overlay->flip_addr;
334 u32 tmp; 340 u32 tmp;
341 int ret;
335 342
336 BUG_ON(!overlay->active); 343 BUG_ON(!overlay->active);
337 344
@@ -347,7 +354,11 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
347 if (tmp & (1 << 17)) 354 if (tmp & (1 << 17))
348 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 355 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
349 356
350 BEGIN_LP_RING(2); 357 ret = BEGIN_LP_RING(2);
358 if (ret) {
359 kfree(request);
360 return ret;
361 }
351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 362 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
352 OUT_RING(flip_addr); 363 OUT_RING(flip_addr);
353 ADVANCE_LP_RING(); 364 ADVANCE_LP_RING();
@@ -389,8 +400,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
389 bool interruptible) 400 bool interruptible)
390{ 401{
391 struct drm_device *dev = overlay->dev; 402 struct drm_device *dev = overlay->dev;
403 struct drm_i915_private *dev_priv = dev->dev_private;
392 u32 flip_addr = overlay->flip_addr; 404 u32 flip_addr = overlay->flip_addr;
393 struct drm_i915_gem_request *request; 405 struct drm_i915_gem_request *request;
406 int ret;
394 407
395 BUG_ON(!overlay->active); 408 BUG_ON(!overlay->active);
396 409
@@ -404,7 +417,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
404 * of the hw. Do it in both cases */ 417 * of the hw. Do it in both cases */
405 flip_addr |= OFC_UPDATE; 418 flip_addr |= OFC_UPDATE;
406 419
407 BEGIN_LP_RING(6); 420 ret = BEGIN_LP_RING(6);
421 if (ret) {
422 kfree(request);
423 return ret;
424 }
408 /* wait for overlay to go idle */ 425 /* wait for overlay to go idle */
409 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
410 OUT_RING(flip_addr); 427 OUT_RING(flip_addr);
@@ -467,7 +484,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467 if (request == NULL) 484 if (request == NULL)
468 return -ENOMEM; 485 return -ENOMEM;
469 486
470 BEGIN_LP_RING(2); 487 ret = BEGIN_LP_RING(2);
488 if (ret) {
489 kfree(request);
490 return ret;
491 }
492
471 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 493 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
472 OUT_RING(MI_NOOP); 494 OUT_RING(MI_NOOP);
473 ADVANCE_LP_RING(); 495 ADVANCE_LP_RING();
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d6eba661105f..6fe42c1f4ea9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -112,10 +112,11 @@ render_ring_flush(struct intel_ring_buffer *ring,
112#if WATCH_EXEC 112#if WATCH_EXEC
113 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 113 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
114#endif 114#endif
115 intel_ring_begin(ring, 2); 115 if (intel_ring_begin(ring, 2) == 0) {
116 intel_ring_emit(ring, cmd); 116 intel_ring_emit(ring, cmd);
117 intel_ring_emit(ring, MI_NOOP); 117 intel_ring_emit(ring, MI_NOOP);
118 intel_ring_advance(ring); 118 intel_ring_advance(ring);
119 }
119 } 120 }
120} 121}
121 122
@@ -244,16 +245,17 @@ render_ring_add_request(struct intel_ring_buffer *ring,
244 seqno = i915_gem_get_seqno(dev); 245 seqno = i915_gem_get_seqno(dev);
245 246
246 if (IS_GEN6(dev)) { 247 if (IS_GEN6(dev)) {
247 intel_ring_begin(ring, 6); 248 if (intel_ring_begin(ring, 6) == 0) {
248 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); 249 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3);
249 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | 250 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE |
250 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | 251 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
251 PIPE_CONTROL_NOTIFY); 252 PIPE_CONTROL_NOTIFY);
252 intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 253 intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
253 intel_ring_emit(ring, seqno); 254 intel_ring_emit(ring, seqno);
254 intel_ring_emit(ring, 0); 255 intel_ring_emit(ring, 0);
255 intel_ring_emit(ring, 0); 256 intel_ring_emit(ring, 0);
256 intel_ring_advance(ring); 257 intel_ring_advance(ring);
258 }
257 } else if (HAS_PIPE_CONTROL(dev)) { 259 } else if (HAS_PIPE_CONTROL(dev)) {
258 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 260 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
259 261
@@ -262,38 +264,40 @@ render_ring_add_request(struct intel_ring_buffer *ring,
262 * PIPE_NOTIFY buffers out to memory before requesting 264 * PIPE_NOTIFY buffers out to memory before requesting
263 * an interrupt. 265 * an interrupt.
264 */ 266 */
265 intel_ring_begin(ring, 32); 267 if (intel_ring_begin(ring, 32) == 0) {
266 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 268 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
267 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); 269 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
268 intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 270 intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
269 intel_ring_emit(ring, seqno); 271 intel_ring_emit(ring, seqno);
270 intel_ring_emit(ring, 0); 272 intel_ring_emit(ring, 0);
271 PIPE_CONTROL_FLUSH(ring, scratch_addr); 273 PIPE_CONTROL_FLUSH(ring, scratch_addr);
272 scratch_addr += 128; /* write to separate cachelines */ 274 scratch_addr += 128; /* write to separate cachelines */
273 PIPE_CONTROL_FLUSH(ring, scratch_addr); 275 PIPE_CONTROL_FLUSH(ring, scratch_addr);
274 scratch_addr += 128; 276 scratch_addr += 128;
275 PIPE_CONTROL_FLUSH(ring, scratch_addr); 277 PIPE_CONTROL_FLUSH(ring, scratch_addr);
276 scratch_addr += 128; 278 scratch_addr += 128;
277 PIPE_CONTROL_FLUSH(ring, scratch_addr); 279 PIPE_CONTROL_FLUSH(ring, scratch_addr);
278 scratch_addr += 128; 280 scratch_addr += 128;
279 PIPE_CONTROL_FLUSH(ring, scratch_addr); 281 PIPE_CONTROL_FLUSH(ring, scratch_addr);
280 scratch_addr += 128; 282 scratch_addr += 128;
281 PIPE_CONTROL_FLUSH(ring, scratch_addr); 283 PIPE_CONTROL_FLUSH(ring, scratch_addr);
282 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 284 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
283 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | 285 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
284 PIPE_CONTROL_NOTIFY); 286 PIPE_CONTROL_NOTIFY);
285 intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 287 intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
286 intel_ring_emit(ring, seqno); 288 intel_ring_emit(ring, seqno);
287 intel_ring_emit(ring, 0); 289 intel_ring_emit(ring, 0);
288 intel_ring_advance(ring); 290 intel_ring_advance(ring);
291 }
289 } else { 292 } else {
290 intel_ring_begin(ring, 4); 293 if (intel_ring_begin(ring, 4) == 0) {
291 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 294 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
292 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 295 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
293 intel_ring_emit(ring, seqno); 296 intel_ring_emit(ring, seqno);
294 297
295 intel_ring_emit(ring, MI_USER_INTERRUPT); 298 intel_ring_emit(ring, MI_USER_INTERRUPT);
296 intel_ring_advance(ring); 299 intel_ring_advance(ring);
300 }
297 } 301 }
298 return seqno; 302 return seqno;
299} 303}
@@ -359,10 +363,11 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
359 u32 invalidate_domains, 363 u32 invalidate_domains,
360 u32 flush_domains) 364 u32 flush_domains)
361{ 365{
362 intel_ring_begin(ring, 2); 366 if (intel_ring_begin(ring, 2) == 0) {
363 intel_ring_emit(ring, MI_FLUSH); 367 intel_ring_emit(ring, MI_FLUSH);
364 intel_ring_emit(ring, MI_NOOP); 368 intel_ring_emit(ring, MI_NOOP);
365 intel_ring_advance(ring); 369 intel_ring_advance(ring);
370 }
366} 371}
367 372
368static u32 373static u32
@@ -373,12 +378,13 @@ ring_add_request(struct intel_ring_buffer *ring,
373 378
374 seqno = i915_gem_get_seqno(ring->dev); 379 seqno = i915_gem_get_seqno(ring->dev);
375 380
376 intel_ring_begin(ring, 4); 381 if (intel_ring_begin(ring, 4) == 0) {
377 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 382 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
378 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 383 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
379 intel_ring_emit(ring, seqno); 384 intel_ring_emit(ring, seqno);
380 intel_ring_emit(ring, MI_USER_INTERRUPT); 385 intel_ring_emit(ring, MI_USER_INTERRUPT);
381 intel_ring_advance(ring); 386 intel_ring_advance(ring);
387 }
382 388
383 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 389 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
384 390
@@ -409,10 +415,14 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
409 uint64_t exec_offset) 415 uint64_t exec_offset)
410{ 416{
411 uint32_t exec_start; 417 uint32_t exec_start;
418 int ret;
412 419
413 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 420 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
414 421
415 intel_ring_begin(ring, 2); 422 ret = intel_ring_begin(ring, 2);
423 if (ret)
424 return ret;
425
416 intel_ring_emit(ring, 426 intel_ring_emit(ring,
417 MI_BATCH_BUFFER_START | 427 MI_BATCH_BUFFER_START |
418 (2 << 6) | 428 (2 << 6) |
@@ -432,8 +442,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
432 struct drm_device *dev = ring->dev; 442 struct drm_device *dev = ring->dev;
433 drm_i915_private_t *dev_priv = dev->dev_private; 443 drm_i915_private_t *dev_priv = dev->dev_private;
434 int nbox = exec->num_cliprects; 444 int nbox = exec->num_cliprects;
435 int i = 0, count;
436 uint32_t exec_start, exec_len; 445 uint32_t exec_start, exec_len;
446 int i, count, ret;
437 447
438 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 448 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
439 exec_len = (uint32_t) exec->batch_len; 449 exec_len = (uint32_t) exec->batch_len;
@@ -441,23 +451,28 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
441 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); 451 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
442 452
443 count = nbox ? nbox : 1; 453 count = nbox ? nbox : 1;
444
445 for (i = 0; i < count; i++) { 454 for (i = 0; i < count; i++) {
446 if (i < nbox) { 455 if (i < nbox) {
447 int ret = i915_emit_box(dev, cliprects, i, 456 ret = i915_emit_box(dev, cliprects, i,
448 exec->DR1, exec->DR4); 457 exec->DR1, exec->DR4);
449 if (ret) 458 if (ret)
450 return ret; 459 return ret;
451 } 460 }
452 461
453 if (IS_I830(dev) || IS_845G(dev)) { 462 if (IS_I830(dev) || IS_845G(dev)) {
454 intel_ring_begin(ring, 4); 463 ret = intel_ring_begin(ring, 4);
464 if (ret)
465 return ret;
466
455 intel_ring_emit(ring, MI_BATCH_BUFFER); 467 intel_ring_emit(ring, MI_BATCH_BUFFER);
456 intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); 468 intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE);
457 intel_ring_emit(ring, exec_start + exec_len - 4); 469 intel_ring_emit(ring, exec_start + exec_len - 4);
458 intel_ring_emit(ring, 0); 470 intel_ring_emit(ring, 0);
459 } else { 471 } else {
460 intel_ring_begin(ring, 2); 472 ret = intel_ring_begin(ring, 2);
473 if (ret)
474 return ret;
475
461 if (INTEL_INFO(dev)->gen >= 4) { 476 if (INTEL_INFO(dev)->gen >= 4) {
462 intel_ring_emit(ring, 477 intel_ring_emit(ring,
463 MI_BATCH_BUFFER_START | (2 << 6) 478 MI_BATCH_BUFFER_START | (2 << 6)
@@ -474,12 +489,13 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
474 } 489 }
475 490
476 if (IS_G4X(dev) || IS_GEN5(dev)) { 491 if (IS_G4X(dev) || IS_GEN5(dev)) {
477 intel_ring_begin(ring, 2); 492 if (intel_ring_begin(ring, 2) == 0) {
478 intel_ring_emit(ring, MI_FLUSH | 493 intel_ring_emit(ring, MI_FLUSH |
479 MI_NO_WRITE_FLUSH | 494 MI_NO_WRITE_FLUSH |
480 MI_INVALIDATE_ISP ); 495 MI_INVALIDATE_ISP );
481 intel_ring_emit(ring, MI_NOOP); 496 intel_ring_emit(ring, MI_NOOP);
482 intel_ring_advance(ring); 497 intel_ring_advance(ring);
498 }
483 } 499 }
484 /* XXX breadcrumb */ 500 /* XXX breadcrumb */
485 501
@@ -693,18 +709,26 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
693 return -EBUSY; 709 return -EBUSY;
694} 710}
695 711
696void intel_ring_begin(struct intel_ring_buffer *ring, 712int intel_ring_begin(struct intel_ring_buffer *ring,
697 int num_dwords) 713 int num_dwords)
698{ 714{
699 int n = 4*num_dwords; 715 int n = 4*num_dwords;
716 int ret;
700 717
701 if (unlikely(ring->tail + n > ring->size)) 718 if (unlikely(ring->tail + n > ring->size)) {
702 intel_wrap_ring_buffer(ring); 719 ret = intel_wrap_ring_buffer(ring);
720 if (unlikely(ret))
721 return ret;
722 }
703 723
704 if (unlikely(ring->space < n)) 724 if (unlikely(ring->space < n)) {
705 intel_wait_ring_buffer(ring, n); 725 ret = intel_wait_ring_buffer(ring, n);
726 if (unlikely(ret))
727 return ret;
728 }
706 729
707 ring->space -= n; 730 ring->space -= n;
731 return 0;
708} 732}
709 733
710void intel_ring_advance(struct intel_ring_buffer *ring) 734void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -772,12 +796,13 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring,
772 u32 invalidate_domains, 796 u32 invalidate_domains,
773 u32 flush_domains) 797 u32 flush_domains)
774{ 798{
775 intel_ring_begin(ring, 4); 799 if (intel_ring_begin(ring, 4) == 0) {
776 intel_ring_emit(ring, MI_FLUSH_DW); 800 intel_ring_emit(ring, MI_FLUSH_DW);
777 intel_ring_emit(ring, 0); 801 intel_ring_emit(ring, 0);
778 intel_ring_emit(ring, 0); 802 intel_ring_emit(ring, 0);
779 intel_ring_emit(ring, 0); 803 intel_ring_emit(ring, 0);
780 intel_ring_advance(ring); 804 intel_ring_advance(ring);
805 }
781} 806}
782 807
783static int 808static int
@@ -787,10 +812,14 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
787 uint64_t exec_offset) 812 uint64_t exec_offset)
788{ 813{
789 uint32_t exec_start; 814 uint32_t exec_start;
815 int ret;
790 816
791 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 817 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
792 818
793 intel_ring_begin(ring, 2); 819 ret = intel_ring_begin(ring, 2);
820 if (ret)
821 return ret;
822
794 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); 823 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
795 /* bit0-7 is the length on GEN6+ */ 824 /* bit0-7 is the length on GEN6+ */
796 intel_ring_emit(ring, exec_start); 825 intel_ring_emit(ring, exec_start);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ba4a393e6d16..35ece2b87b02 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -100,8 +100,8 @@ intel_read_status_page(struct intel_ring_buffer *ring,
100} 100}
101 101
102void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 102void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
103int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 103int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
104void intel_ring_begin(struct intel_ring_buffer *ring, int n); 104int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
105 105
106static inline void intel_ring_emit(struct intel_ring_buffer *ring, 106static inline void intel_ring_emit(struct intel_ring_buffer *ring,
107 u32 data) 107 u32 data)