diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-30 09:10:25 -0500 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-11-30 09:17:51 -0500 |
commit | c4e7a4146798ce22c229dd21ed31f59f07c4119e (patch) | |
tree | 4187f46f837822ac1e3664e30a9b3b916bc53983 /drivers | |
parent | 70eac33e7ac370dc137cabff7a4ba3094ca25a8c (diff) |
drm/i915/ringbuffer: Handle cliprects in the caller
This makes the various rings more consistent by removing the anomalous
handing of the rendering ring execbuffer dispatch.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 86 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 4 |
5 files changed, 67 insertions, 79 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7960fd63ecb..9a22da9b208 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -352,16 +352,16 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |||
352 | 352 | ||
353 | int | 353 | int |
354 | i915_emit_box(struct drm_device *dev, | 354 | i915_emit_box(struct drm_device *dev, |
355 | struct drm_clip_rect *boxes, | 355 | struct drm_clip_rect *box, |
356 | int i, int DR1, int DR4) | 356 | int DR1, int DR4) |
357 | { | 357 | { |
358 | struct drm_i915_private *dev_priv = dev->dev_private; | 358 | struct drm_i915_private *dev_priv = dev->dev_private; |
359 | struct drm_clip_rect box = boxes[i]; | ||
360 | int ret; | 359 | int ret; |
361 | 360 | ||
362 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 361 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
362 | box->y2 <= 0 || box->x2 <= 0) { | ||
363 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 363 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
364 | box.x1, box.y1, box.x2, box.y2); | 364 | box->x1, box->y1, box->x2, box->y2); |
365 | return -EINVAL; | 365 | return -EINVAL; |
366 | } | 366 | } |
367 | 367 | ||
@@ -371,8 +371,8 @@ i915_emit_box(struct drm_device *dev, | |||
371 | return ret; | 371 | return ret; |
372 | 372 | ||
373 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | 373 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
374 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 374 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
375 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 375 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
376 | OUT_RING(DR4); | 376 | OUT_RING(DR4); |
377 | } else { | 377 | } else { |
378 | ret = BEGIN_LP_RING(6); | 378 | ret = BEGIN_LP_RING(6); |
@@ -381,8 +381,8 @@ i915_emit_box(struct drm_device *dev, | |||
381 | 381 | ||
382 | OUT_RING(GFX_OP_DRAWRECT_INFO); | 382 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
383 | OUT_RING(DR1); | 383 | OUT_RING(DR1); |
384 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 384 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
385 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 385 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
386 | OUT_RING(DR4); | 386 | OUT_RING(DR4); |
387 | OUT_RING(0); | 387 | OUT_RING(0); |
388 | } | 388 | } |
@@ -434,7 +434,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
434 | 434 | ||
435 | for (i = 0; i < count; i++) { | 435 | for (i = 0; i < count; i++) { |
436 | if (i < nbox) { | 436 | if (i < nbox) { |
437 | ret = i915_emit_box(dev, cliprects, i, | 437 | ret = i915_emit_box(dev, &cliprects[i], |
438 | cmd->DR1, cmd->DR4); | 438 | cmd->DR1, cmd->DR4); |
439 | if (ret) | 439 | if (ret) |
440 | return ret; | 440 | return ret; |
@@ -467,7 +467,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
467 | count = nbox ? nbox : 1; | 467 | count = nbox ? nbox : 1; |
468 | for (i = 0; i < count; i++) { | 468 | for (i = 0; i < count; i++) { |
469 | if (i < nbox) { | 469 | if (i < nbox) { |
470 | ret = i915_emit_box(dev, cliprects, i, | 470 | ret = i915_emit_box(dev, &cliprects[i], |
471 | batch->DR1, batch->DR4); | 471 | batch->DR1, batch->DR4); |
472 | if (ret) | 472 | if (ret) |
473 | return ret; | 473 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e7c4108c94c..590d8f2d095 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -966,8 +966,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); | |||
966 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | 966 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
967 | unsigned long arg); | 967 | unsigned long arg); |
968 | extern int i915_emit_box(struct drm_device *dev, | 968 | extern int i915_emit_box(struct drm_device *dev, |
969 | struct drm_clip_rect *boxes, | 969 | struct drm_clip_rect *box, |
970 | int i, int DR1, int DR4); | 970 | int DR1, int DR4); |
971 | extern int i915_reset(struct drm_device *dev, u8 flags); | 971 | extern int i915_reset(struct drm_device *dev, u8 flags); |
972 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); | 972 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
973 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | 973 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 66c898c8716..f57536a70a3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -825,6 +825,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
825 | struct drm_i915_gem_object *batch_obj; | 825 | struct drm_i915_gem_object *batch_obj; |
826 | struct drm_clip_rect *cliprects = NULL; | 826 | struct drm_clip_rect *cliprects = NULL; |
827 | struct intel_ring_buffer *ring; | 827 | struct intel_ring_buffer *ring; |
828 | u32 exec_start, exec_len; | ||
828 | int ret, i; | 829 | int ret, i; |
829 | 830 | ||
830 | if (!i915_gem_check_execbuffer(args)) { | 831 | if (!i915_gem_check_execbuffer(args)) { |
@@ -871,6 +872,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
871 | } | 872 | } |
872 | 873 | ||
873 | if (args->num_cliprects != 0) { | 874 | if (args->num_cliprects != 0) { |
875 | if (ring != &dev_priv->render_ring) { | ||
876 | DRM_ERROR("clip rectangles are only valid with the render ring\n"); | ||
877 | return -EINVAL; | ||
878 | } | ||
879 | |||
874 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), | 880 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
875 | GFP_KERNEL); | 881 | GFP_KERNEL); |
876 | if (cliprects == NULL) { | 882 | if (cliprects == NULL) { |
@@ -959,11 +965,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
959 | if (ret) | 965 | if (ret) |
960 | goto err; | 966 | goto err; |
961 | 967 | ||
962 | ret = ring->dispatch_execbuffer(ring, | 968 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
963 | args, cliprects, | 969 | exec_len = args->batch_len; |
964 | batch_obj->gtt_offset); | 970 | if (cliprects) { |
965 | if (ret) | 971 | for (i = 0; i < args->num_cliprects; i++) { |
966 | goto err; | 972 | ret = i915_emit_box(dev, &cliprects[i], |
973 | args->DR1, args->DR4); | ||
974 | if (ret) | ||
975 | goto err; | ||
976 | |||
977 | ret = ring->dispatch_execbuffer(ring, | ||
978 | exec_start, exec_len); | ||
979 | if (ret) | ||
980 | goto err; | ||
981 | } | ||
982 | } else { | ||
983 | ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); | ||
984 | if (ret) | ||
985 | goto err; | ||
986 | } | ||
967 | 987 | ||
968 | i915_gem_execbuffer_move_to_active(&objects, ring); | 988 | i915_gem_execbuffer_move_to_active(&objects, ring); |
969 | i915_gem_execbuffer_retire_commands(dev, file, ring); | 989 | i915_gem_execbuffer_retire_commands(dev, file, ring); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 7fc55a80be2..21871b0766e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -508,25 +508,18 @@ ring_status_page_get_seqno(struct intel_ring_buffer *ring) | |||
508 | } | 508 | } |
509 | 509 | ||
510 | static int | 510 | static int |
511 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 511 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
512 | struct drm_i915_gem_execbuffer2 *exec, | ||
513 | struct drm_clip_rect *cliprects, | ||
514 | uint64_t exec_offset) | ||
515 | { | 512 | { |
516 | uint32_t exec_start; | ||
517 | int ret; | 513 | int ret; |
518 | 514 | ||
519 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
520 | |||
521 | ret = intel_ring_begin(ring, 2); | 515 | ret = intel_ring_begin(ring, 2); |
522 | if (ret) | 516 | if (ret) |
523 | return ret; | 517 | return ret; |
524 | 518 | ||
525 | intel_ring_emit(ring, | 519 | intel_ring_emit(ring, |
526 | MI_BATCH_BUFFER_START | | 520 | MI_BATCH_BUFFER_START | (2 << 6) | |
527 | (2 << 6) | | ||
528 | MI_BATCH_NON_SECURE_I965); | 521 | MI_BATCH_NON_SECURE_I965); |
529 | intel_ring_emit(ring, exec_start); | 522 | intel_ring_emit(ring, offset); |
530 | intel_ring_advance(ring); | 523 | intel_ring_advance(ring); |
531 | 524 | ||
532 | return 0; | 525 | return 0; |
@@ -534,58 +527,40 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
534 | 527 | ||
535 | static int | 528 | static int |
536 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 529 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
537 | struct drm_i915_gem_execbuffer2 *exec, | 530 | u32 offset, u32 len) |
538 | struct drm_clip_rect *cliprects, | ||
539 | uint64_t exec_offset) | ||
540 | { | 531 | { |
541 | struct drm_device *dev = ring->dev; | 532 | struct drm_device *dev = ring->dev; |
542 | drm_i915_private_t *dev_priv = dev->dev_private; | 533 | drm_i915_private_t *dev_priv = dev->dev_private; |
543 | int nbox = exec->num_cliprects; | 534 | int ret; |
544 | uint32_t exec_start, exec_len; | ||
545 | int i, count, ret; | ||
546 | |||
547 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
548 | exec_len = (uint32_t) exec->batch_len; | ||
549 | 535 | ||
550 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); | 536 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); |
551 | 537 | ||
552 | count = nbox ? nbox : 1; | 538 | if (IS_I830(dev) || IS_845G(dev)) { |
553 | for (i = 0; i < count; i++) { | 539 | ret = intel_ring_begin(ring, 4); |
554 | if (i < nbox) { | 540 | if (ret) |
555 | ret = i915_emit_box(dev, cliprects, i, | 541 | return ret; |
556 | exec->DR1, exec->DR4); | ||
557 | if (ret) | ||
558 | return ret; | ||
559 | } | ||
560 | 542 | ||
561 | if (IS_I830(dev) || IS_845G(dev)) { | 543 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
562 | ret = intel_ring_begin(ring, 4); | 544 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
563 | if (ret) | 545 | intel_ring_emit(ring, offset + len - 8); |
564 | return ret; | 546 | intel_ring_emit(ring, 0); |
547 | } else { | ||
548 | ret = intel_ring_begin(ring, 2); | ||
549 | if (ret) | ||
550 | return ret; | ||
565 | 551 | ||
566 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 552 | if (INTEL_INFO(dev)->gen >= 4) { |
567 | intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); | 553 | intel_ring_emit(ring, |
568 | intel_ring_emit(ring, exec_start + exec_len - 4); | 554 | MI_BATCH_BUFFER_START | (2 << 6) | |
569 | intel_ring_emit(ring, 0); | 555 | MI_BATCH_NON_SECURE_I965); |
556 | intel_ring_emit(ring, offset); | ||
570 | } else { | 557 | } else { |
571 | ret = intel_ring_begin(ring, 2); | 558 | intel_ring_emit(ring, |
572 | if (ret) | 559 | MI_BATCH_BUFFER_START | (2 << 6)); |
573 | return ret; | 560 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
574 | |||
575 | if (INTEL_INFO(dev)->gen >= 4) { | ||
576 | intel_ring_emit(ring, | ||
577 | MI_BATCH_BUFFER_START | (2 << 6) | ||
578 | | MI_BATCH_NON_SECURE_I965); | ||
579 | intel_ring_emit(ring, exec_start); | ||
580 | } else { | ||
581 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | ||
582 | | (2 << 6)); | ||
583 | intel_ring_emit(ring, exec_start | | ||
584 | MI_BATCH_NON_SECURE); | ||
585 | } | ||
586 | } | 561 | } |
587 | intel_ring_advance(ring); | ||
588 | } | 562 | } |
563 | intel_ring_advance(ring); | ||
589 | 564 | ||
590 | return 0; | 565 | return 0; |
591 | } | 566 | } |
@@ -904,22 +879,17 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring, | |||
904 | 879 | ||
905 | static int | 880 | static int |
906 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 881 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
907 | struct drm_i915_gem_execbuffer2 *exec, | 882 | u32 offset, u32 len) |
908 | struct drm_clip_rect *cliprects, | ||
909 | uint64_t exec_offset) | ||
910 | { | 883 | { |
911 | uint32_t exec_start; | ||
912 | int ret; | 884 | int ret; |
913 | 885 | ||
914 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
915 | |||
916 | ret = intel_ring_begin(ring, 2); | 886 | ret = intel_ring_begin(ring, 2); |
917 | if (ret) | 887 | if (ret) |
918 | return ret; | 888 | return ret; |
919 | 889 | ||
920 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | 890 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
921 | /* bit0-7 is the length on GEN6+ */ | 891 | /* bit0-7 is the length on GEN6+ */ |
922 | intel_ring_emit(ring, exec_start); | 892 | intel_ring_emit(ring, offset); |
923 | intel_ring_advance(ring); | 893 | intel_ring_advance(ring); |
924 | 894 | ||
925 | return 0; | 895 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 1747e329ee9..8e352677792 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -57,9 +57,7 @@ struct intel_ring_buffer { | |||
57 | u32 *seqno); | 57 | u32 *seqno); |
58 | u32 (*get_seqno)(struct intel_ring_buffer *ring); | 58 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
59 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | 59 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
60 | struct drm_i915_gem_execbuffer2 *exec, | 60 | u32 offset, u32 length); |
61 | struct drm_clip_rect *cliprects, | ||
62 | uint64_t exec_offset); | ||
63 | void (*cleanup)(struct intel_ring_buffer *ring); | 61 | void (*cleanup)(struct intel_ring_buffer *ring); |
64 | 62 | ||
65 | /** | 63 | /** |