diff options
author | Zou Nan hai <nanhai.zou@intel.com> | 2010-05-20 21:08:57 -0400 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2010-05-26 16:46:58 -0400 |
commit | d1b851fc0d105caa6b6e3e7c92d2987dfb52cbe0 (patch) | |
tree | 19f9c7d935725c513cd56ed14191f2827afe2f38 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 852835f343146a82a528c3b712b373661d4fa17a (diff) |
drm/i915: implement BSD ring buffer V2
The BSD (bit stream decoder) ring is used for accessing the BSD engine
which decodes video bitstream for H.264 and VC1 on G45+. It is
asynchronous with the render ring and has access to separate parts of
the GPU from it, though the render cache is coherent between the two.
Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 153 |
1 files changed, 153 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 5715c4d8cce9..f6b84fe8099a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -340,6 +340,119 @@ static void render_setup_status_page(struct drm_device *dev, | |||
340 | 340 | ||
341 | } | 341 | } |
342 | 342 | ||
343 | void | ||
344 | bsd_ring_flush(struct drm_device *dev, | ||
345 | struct intel_ring_buffer *ring, | ||
346 | u32 invalidate_domains, | ||
347 | u32 flush_domains) | ||
348 | { | ||
349 | intel_ring_begin(dev, ring, 8); | ||
350 | intel_ring_emit(dev, ring, MI_FLUSH); | ||
351 | intel_ring_emit(dev, ring, MI_NOOP); | ||
352 | intel_ring_advance(dev, ring); | ||
353 | } | ||
354 | |||
355 | static inline unsigned int bsd_ring_get_head(struct drm_device *dev, | ||
356 | struct intel_ring_buffer *ring) | ||
357 | { | ||
358 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
359 | return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; | ||
360 | } | ||
361 | |||
362 | static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, | ||
363 | struct intel_ring_buffer *ring) | ||
364 | { | ||
365 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
366 | return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; | ||
367 | } | ||
368 | |||
369 | static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, | ||
370 | struct intel_ring_buffer *ring) | ||
371 | { | ||
372 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
373 | return I915_READ(BSD_RING_ACTHD); | ||
374 | } | ||
375 | |||
376 | static inline void bsd_ring_advance_ring(struct drm_device *dev, | ||
377 | struct intel_ring_buffer *ring) | ||
378 | { | ||
379 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
380 | I915_WRITE(BSD_RING_TAIL, ring->tail); | ||
381 | } | ||
382 | |||
383 | static int init_bsd_ring(struct drm_device *dev, | ||
384 | struct intel_ring_buffer *ring) | ||
385 | { | ||
386 | return init_ring_common(dev, ring); | ||
387 | } | ||
388 | |||
389 | static u32 | ||
390 | bsd_ring_add_request(struct drm_device *dev, | ||
391 | struct intel_ring_buffer *ring, | ||
392 | struct drm_file *file_priv, | ||
393 | u32 flush_domains) | ||
394 | { | ||
395 | u32 seqno; | ||
396 | seqno = intel_ring_get_seqno(dev, ring); | ||
397 | intel_ring_begin(dev, ring, 4); | ||
398 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | ||
399 | intel_ring_emit(dev, ring, | ||
400 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
401 | intel_ring_emit(dev, ring, seqno); | ||
402 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
403 | intel_ring_advance(dev, ring); | ||
404 | |||
405 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
406 | |||
407 | return seqno; | ||
408 | } | ||
409 | |||
410 | static void bsd_setup_status_page(struct drm_device *dev, | ||
411 | struct intel_ring_buffer *ring) | ||
412 | { | ||
413 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
414 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); | ||
415 | I915_READ(BSD_HWS_PGA); | ||
416 | } | ||
417 | |||
418 | static void | ||
419 | bsd_ring_get_user_irq(struct drm_device *dev, | ||
420 | struct intel_ring_buffer *ring) | ||
421 | { | ||
422 | /* do nothing */ | ||
423 | } | ||
424 | static void | ||
425 | bsd_ring_put_user_irq(struct drm_device *dev, | ||
426 | struct intel_ring_buffer *ring) | ||
427 | { | ||
428 | /* do nothing */ | ||
429 | } | ||
430 | |||
431 | static u32 | ||
432 | bsd_ring_get_gem_seqno(struct drm_device *dev, | ||
433 | struct intel_ring_buffer *ring) | ||
434 | { | ||
435 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | ||
436 | } | ||
437 | |||
438 | static int | ||
439 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | ||
440 | struct intel_ring_buffer *ring, | ||
441 | struct drm_i915_gem_execbuffer2 *exec, | ||
442 | struct drm_clip_rect *cliprects, | ||
443 | uint64_t exec_offset) | ||
444 | { | ||
445 | uint32_t exec_start; | ||
446 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
447 | intel_ring_begin(dev, ring, 2); | ||
448 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | | ||
449 | (2 << 6) | MI_BATCH_NON_SECURE_I965); | ||
450 | intel_ring_emit(dev, ring, exec_start); | ||
451 | intel_ring_advance(dev, ring); | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | |||
343 | static int | 456 | static int |
344 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 457 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
345 | struct intel_ring_buffer *ring, | 458 | struct intel_ring_buffer *ring, |
@@ -588,6 +701,7 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
588 | if (master_priv->sarea_priv) | 701 | if (master_priv->sarea_priv) |
589 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 702 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
590 | } | 703 | } |
704 | |||
591 | yield(); | 705 | yield(); |
592 | } while (!time_after(jiffies, end)); | 706 | } while (!time_after(jiffies, end)); |
593 | trace_i915_ring_wait_end (dev); | 707 | trace_i915_ring_wait_end (dev); |
@@ -682,3 +796,42 @@ struct intel_ring_buffer render_ring = { | |||
682 | .status_page = {NULL, 0, NULL}, | 796 | .status_page = {NULL, 0, NULL}, |
683 | .map = {0,} | 797 | .map = {0,} |
684 | }; | 798 | }; |
799 | |||
800 | /* ring buffer for bit-stream decoder */ | ||
801 | |||
802 | struct intel_ring_buffer bsd_ring = { | ||
803 | .name = "bsd ring", | ||
804 | .regs = { | ||
805 | .ctl = BSD_RING_CTL, | ||
806 | .head = BSD_RING_HEAD, | ||
807 | .tail = BSD_RING_TAIL, | ||
808 | .start = BSD_RING_START | ||
809 | }, | ||
810 | .ring_flag = I915_EXEC_BSD, | ||
811 | .size = 32 * PAGE_SIZE, | ||
812 | .alignment = PAGE_SIZE, | ||
813 | .virtual_start = NULL, | ||
814 | .dev = NULL, | ||
815 | .gem_object = NULL, | ||
816 | .head = 0, | ||
817 | .tail = 0, | ||
818 | .space = 0, | ||
819 | .next_seqno = 1, | ||
820 | .user_irq_refcount = 0, | ||
821 | .irq_gem_seqno = 0, | ||
822 | .waiting_gem_seqno = 0, | ||
823 | .setup_status_page = bsd_setup_status_page, | ||
824 | .init = init_bsd_ring, | ||
825 | .get_head = bsd_ring_get_head, | ||
826 | .get_tail = bsd_ring_get_tail, | ||
827 | .get_active_head = bsd_ring_get_active_head, | ||
828 | .advance_ring = bsd_ring_advance_ring, | ||
829 | .flush = bsd_ring_flush, | ||
830 | .add_request = bsd_ring_add_request, | ||
831 | .get_gem_seqno = bsd_ring_get_gem_seqno, | ||
832 | .user_irq_get = bsd_ring_get_user_irq, | ||
833 | .user_irq_put = bsd_ring_put_user_irq, | ||
834 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, | ||
835 | .status_page = {NULL, 0, NULL}, | ||
836 | .map = {0,} | ||
837 | }; | ||