diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-27 07:18:21 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-27 07:18:21 -0400 |
commit | 78501eac34f372bfbeb4e1d9de688c13efa916f6 (patch) | |
tree | a490359ac69c394149362e6571a37189ee264739 | |
parent | dd2b379f071424f36f9f90ff83cb4ad058c7b6ed (diff) |
drm/i915/ringbuffer: Drop the redundant dev from the vfunc interface
The ringbuffer keeps a pointer to the parent device, so we can use that
instead of passing around the pointer on the stack.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 378 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 76 |
8 files changed, 247 insertions, 298 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1f4f3ceb63c7..c1b04b6056da 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -296,7 +296,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) | |||
296 | 296 | ||
297 | if (dev_priv->render_ring.status_page.page_addr != NULL) { | 297 | if (dev_priv->render_ring.status_page.page_addr != NULL) { |
298 | seq_printf(m, "Current sequence: %d\n", | 298 | seq_printf(m, "Current sequence: %d\n", |
299 | dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); | 299 | dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); |
300 | } else { | 300 | } else { |
301 | seq_printf(m, "Current sequence: hws uninitialized\n"); | 301 | seq_printf(m, "Current sequence: hws uninitialized\n"); |
302 | } | 302 | } |
@@ -356,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
356 | atomic_read(&dev_priv->irq_received)); | 356 | atomic_read(&dev_priv->irq_received)); |
357 | if (dev_priv->render_ring.status_page.page_addr != NULL) { | 357 | if (dev_priv->render_ring.status_page.page_addr != NULL) { |
358 | seq_printf(m, "Current sequence: %d\n", | 358 | seq_printf(m, "Current sequence: %d\n", |
359 | dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); | 359 | dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); |
360 | } else { | 360 | } else { |
361 | seq_printf(m, "Current sequence: hws uninitialized\n"); | 361 | seq_printf(m, "Current sequence: hws uninitialized\n"); |
362 | } | 362 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7a26f4dd21ae..8a171394a9cf 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -131,9 +131,9 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
131 | drm_irq_uninstall(dev); | 131 | drm_irq_uninstall(dev); |
132 | 132 | ||
133 | mutex_lock(&dev->struct_mutex); | 133 | mutex_lock(&dev->struct_mutex); |
134 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 134 | intel_cleanup_ring_buffer(&dev_priv->render_ring); |
135 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 135 | intel_cleanup_ring_buffer(&dev_priv->bsd_ring); |
136 | intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); | 136 | intel_cleanup_ring_buffer(&dev_priv->blt_ring); |
137 | mutex_unlock(&dev->struct_mutex); | 137 | mutex_unlock(&dev->struct_mutex); |
138 | 138 | ||
139 | /* Clear the HWS virtual address at teardown */ | 139 | /* Clear the HWS virtual address at teardown */ |
@@ -221,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev) | |||
221 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | 221 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
222 | ring->status_page.page_addr); | 222 | ring->status_page.page_addr); |
223 | if (ring->status_page.gfx_addr != 0) | 223 | if (ring->status_page.gfx_addr != 0) |
224 | intel_ring_setup_status_page(dev, ring); | 224 | intel_ring_setup_status_page(ring); |
225 | else | 225 | else |
226 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 226 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
227 | 227 | ||
@@ -567,7 +567,7 @@ static int i915_quiescent(struct drm_device * dev) | |||
567 | drm_i915_private_t *dev_priv = dev->dev_private; | 567 | drm_i915_private_t *dev_priv = dev->dev_private; |
568 | 568 | ||
569 | i915_kernel_lost_context(dev); | 569 | i915_kernel_lost_context(dev); |
570 | return intel_wait_ring_buffer(dev, &dev_priv->render_ring, | 570 | return intel_wait_ring_buffer(&dev_priv->render_ring, |
571 | dev_priv->render_ring.size - 8); | 571 | dev_priv->render_ring.size - 8); |
572 | } | 572 | } |
573 | 573 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3467dd420760..82c19ab3e1e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -473,7 +473,7 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
473 | !dev_priv->mm.suspended) { | 473 | !dev_priv->mm.suspended) { |
474 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | 474 | struct intel_ring_buffer *ring = &dev_priv->render_ring; |
475 | dev_priv->mm.suspended = 0; | 475 | dev_priv->mm.suspended = 0; |
476 | ring->init(dev, ring); | 476 | ring->init(ring); |
477 | mutex_unlock(&dev->struct_mutex); | 477 | mutex_unlock(&dev->struct_mutex); |
478 | drm_irq_uninstall(dev); | 478 | drm_irq_uninstall(dev); |
479 | drm_irq_install(dev); | 479 | drm_irq_install(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2c19b6285e..6fb225f6b2c8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1219,10 +1219,10 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1219 | #define I915_VERBOSE 0 | 1219 | #define I915_VERBOSE 0 |
1220 | 1220 | ||
1221 | #define BEGIN_LP_RING(n) do { \ | 1221 | #define BEGIN_LP_RING(n) do { \ |
1222 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | 1222 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ |
1223 | if (I915_VERBOSE) \ | 1223 | if (I915_VERBOSE) \ |
1224 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ | 1224 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ |
1225 | intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \ | 1225 | intel_ring_begin(&dev_priv__->render_ring, (n)); \ |
1226 | } while (0) | 1226 | } while (0) |
1227 | 1227 | ||
1228 | 1228 | ||
@@ -1230,7 +1230,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1230 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | 1230 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ |
1231 | if (I915_VERBOSE) \ | 1231 | if (I915_VERBOSE) \ |
1232 | DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ | 1232 | DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ |
1233 | intel_ring_emit(dev, &dev_priv__->render_ring, x); \ | 1233 | intel_ring_emit(&dev_priv__->render_ring, x); \ |
1234 | } while (0) | 1234 | } while (0) |
1235 | 1235 | ||
1236 | #define ADVANCE_LP_RING() do { \ | 1236 | #define ADVANCE_LP_RING() do { \ |
@@ -1238,7 +1238,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1238 | if (I915_VERBOSE) \ | 1238 | if (I915_VERBOSE) \ |
1239 | DRM_DEBUG("ADVANCE_LP_RING %x\n", \ | 1239 | DRM_DEBUG("ADVANCE_LP_RING %x\n", \ |
1240 | dev_priv__->render_ring.tail); \ | 1240 | dev_priv__->render_ring.tail); \ |
1241 | intel_ring_advance(dev, &dev_priv__->render_ring); \ | 1241 | intel_ring_advance(&dev_priv__->render_ring); \ |
1242 | } while(0) | 1242 | } while(0) |
1243 | 1243 | ||
1244 | /** | 1244 | /** |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8eb8453208b5..97bf7c87d857 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1703,7 +1703,7 @@ i915_add_request(struct drm_device *dev, | |||
1703 | return 0; | 1703 | return 0; |
1704 | } | 1704 | } |
1705 | 1705 | ||
1706 | seqno = ring->add_request(dev, ring, 0); | 1706 | seqno = ring->add_request(ring, 0); |
1707 | ring->outstanding_lazy_request = false; | 1707 | ring->outstanding_lazy_request = false; |
1708 | 1708 | ||
1709 | request->seqno = seqno; | 1709 | request->seqno = seqno; |
@@ -1745,8 +1745,7 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) | |||
1745 | if (INTEL_INFO(dev)->gen >= 4) | 1745 | if (INTEL_INFO(dev)->gen >= 4) |
1746 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | 1746 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; |
1747 | 1747 | ||
1748 | ring->flush(dev, ring, | 1748 | ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); |
1749 | I915_GEM_DOMAIN_COMMAND, flush_domains); | ||
1750 | } | 1749 | } |
1751 | 1750 | ||
1752 | static inline void | 1751 | static inline void |
@@ -1853,7 +1852,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1853 | 1852 | ||
1854 | WARN_ON(i915_verify_lists(dev)); | 1853 | WARN_ON(i915_verify_lists(dev)); |
1855 | 1854 | ||
1856 | seqno = ring->get_seqno(dev, ring); | 1855 | seqno = ring->get_seqno(ring); |
1857 | while (!list_empty(&ring->request_list)) { | 1856 | while (!list_empty(&ring->request_list)) { |
1858 | struct drm_i915_gem_request *request; | 1857 | struct drm_i915_gem_request *request; |
1859 | 1858 | ||
@@ -1894,7 +1893,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1894 | 1893 | ||
1895 | if (unlikely (dev_priv->trace_irq_seqno && | 1894 | if (unlikely (dev_priv->trace_irq_seqno && |
1896 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 1895 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { |
1897 | ring->user_irq_put(dev, ring); | 1896 | ring->user_irq_put(ring); |
1898 | dev_priv->trace_irq_seqno = 0; | 1897 | dev_priv->trace_irq_seqno = 0; |
1899 | } | 1898 | } |
1900 | 1899 | ||
@@ -1971,7 +1970,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1971 | } | 1970 | } |
1972 | BUG_ON(seqno == dev_priv->next_seqno); | 1971 | BUG_ON(seqno == dev_priv->next_seqno); |
1973 | 1972 | ||
1974 | if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { | 1973 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { |
1975 | if (HAS_PCH_SPLIT(dev)) | 1974 | if (HAS_PCH_SPLIT(dev)) |
1976 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 1975 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
1977 | else | 1976 | else |
@@ -1986,19 +1985,17 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1986 | trace_i915_gem_request_wait_begin(dev, seqno); | 1985 | trace_i915_gem_request_wait_begin(dev, seqno); |
1987 | 1986 | ||
1988 | ring->waiting_gem_seqno = seqno; | 1987 | ring->waiting_gem_seqno = seqno; |
1989 | ring->user_irq_get(dev, ring); | 1988 | ring->user_irq_get(ring); |
1990 | if (interruptible) | 1989 | if (interruptible) |
1991 | ret = wait_event_interruptible(ring->irq_queue, | 1990 | ret = wait_event_interruptible(ring->irq_queue, |
1992 | i915_seqno_passed( | 1991 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
1993 | ring->get_seqno(dev, ring), seqno) | ||
1994 | || atomic_read(&dev_priv->mm.wedged)); | 1992 | || atomic_read(&dev_priv->mm.wedged)); |
1995 | else | 1993 | else |
1996 | wait_event(ring->irq_queue, | 1994 | wait_event(ring->irq_queue, |
1997 | i915_seqno_passed( | 1995 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
1998 | ring->get_seqno(dev, ring), seqno) | ||
1999 | || atomic_read(&dev_priv->mm.wedged)); | 1996 | || atomic_read(&dev_priv->mm.wedged)); |
2000 | 1997 | ||
2001 | ring->user_irq_put(dev, ring); | 1998 | ring->user_irq_put(ring); |
2002 | ring->waiting_gem_seqno = 0; | 1999 | ring->waiting_gem_seqno = 0; |
2003 | 2000 | ||
2004 | trace_i915_gem_request_wait_end(dev, seqno); | 2001 | trace_i915_gem_request_wait_end(dev, seqno); |
@@ -2008,7 +2005,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2008 | 2005 | ||
2009 | if (ret && ret != -ERESTARTSYS) | 2006 | if (ret && ret != -ERESTARTSYS) |
2010 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", | 2007 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", |
2011 | __func__, ret, seqno, ring->get_seqno(dev, ring), | 2008 | __func__, ret, seqno, ring->get_seqno(ring), |
2012 | dev_priv->next_seqno); | 2009 | dev_priv->next_seqno); |
2013 | 2010 | ||
2014 | /* Directly dispatch request retiring. While we have the work queue | 2011 | /* Directly dispatch request retiring. While we have the work queue |
@@ -2040,7 +2037,7 @@ i915_gem_flush_ring(struct drm_device *dev, | |||
2040 | uint32_t invalidate_domains, | 2037 | uint32_t invalidate_domains, |
2041 | uint32_t flush_domains) | 2038 | uint32_t flush_domains) |
2042 | { | 2039 | { |
2043 | ring->flush(dev, ring, invalidate_domains, flush_domains); | 2040 | ring->flush(ring, invalidate_domains, flush_domains); |
2044 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 2041 | i915_gem_process_flushing_list(dev, flush_domains, ring); |
2045 | } | 2042 | } |
2046 | 2043 | ||
@@ -3532,17 +3529,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3532 | return 0; | 3529 | return 0; |
3533 | 3530 | ||
3534 | ret = 0; | 3531 | ret = 0; |
3535 | if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { | 3532 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { |
3536 | /* And wait for the seqno passing without holding any locks and | 3533 | /* And wait for the seqno passing without holding any locks and |
3537 | * causing extra latency for others. This is safe as the irq | 3534 | * causing extra latency for others. This is safe as the irq |
3538 | * generation is designed to be run atomically and so is | 3535 | * generation is designed to be run atomically and so is |
3539 | * lockless. | 3536 | * lockless. |
3540 | */ | 3537 | */ |
3541 | ring->user_irq_get(dev, ring); | 3538 | ring->user_irq_get(ring); |
3542 | ret = wait_event_interruptible(ring->irq_queue, | 3539 | ret = wait_event_interruptible(ring->irq_queue, |
3543 | i915_seqno_passed(ring->get_seqno(dev, ring), seqno) | 3540 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
3544 | || atomic_read(&dev_priv->mm.wedged)); | 3541 | || atomic_read(&dev_priv->mm.wedged)); |
3545 | ring->user_irq_put(dev, ring); | 3542 | ring->user_irq_put(ring); |
3546 | 3543 | ||
3547 | if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) | 3544 | if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) |
3548 | ret = -EIO; | 3545 | ret = -EIO; |
@@ -3829,17 +3826,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3829 | else | 3826 | else |
3830 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | 3827 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
3831 | 3828 | ||
3832 | intel_ring_begin(dev, ring, 2); | 3829 | intel_ring_begin(ring, 2); |
3833 | intel_ring_emit(dev, ring, | 3830 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
3834 | MI_WAIT_FOR_EVENT | flip_mask); | 3831 | intel_ring_emit(ring, MI_NOOP); |
3835 | intel_ring_emit(dev, ring, MI_NOOP); | 3832 | intel_ring_advance(ring); |
3836 | intel_ring_advance(dev, ring); | ||
3837 | } | 3833 | } |
3838 | } | 3834 | } |
3839 | 3835 | ||
3840 | /* Exec the batchbuffer */ | 3836 | /* Exec the batchbuffer */ |
3841 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, | 3837 | ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset); |
3842 | cliprects, exec_offset); | ||
3843 | if (ret) { | 3838 | if (ret) { |
3844 | DRM_ERROR("dispatch failed %d\n", ret); | 3839 | DRM_ERROR("dispatch failed %d\n", ret); |
3845 | goto err; | 3840 | goto err; |
@@ -4520,9 +4515,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4520 | return 0; | 4515 | return 0; |
4521 | 4516 | ||
4522 | cleanup_bsd_ring: | 4517 | cleanup_bsd_ring: |
4523 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 4518 | intel_cleanup_ring_buffer(&dev_priv->bsd_ring); |
4524 | cleanup_render_ring: | 4519 | cleanup_render_ring: |
4525 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 4520 | intel_cleanup_ring_buffer(&dev_priv->render_ring); |
4526 | cleanup_pipe_control: | 4521 | cleanup_pipe_control: |
4527 | if (HAS_PIPE_CONTROL(dev)) | 4522 | if (HAS_PIPE_CONTROL(dev)) |
4528 | i915_gem_cleanup_pipe_control(dev); | 4523 | i915_gem_cleanup_pipe_control(dev); |
@@ -4534,9 +4529,9 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
4534 | { | 4529 | { |
4535 | drm_i915_private_t *dev_priv = dev->dev_private; | 4530 | drm_i915_private_t *dev_priv = dev->dev_private; |
4536 | 4531 | ||
4537 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 4532 | intel_cleanup_ring_buffer(&dev_priv->render_ring); |
4538 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 4533 | intel_cleanup_ring_buffer(&dev_priv->bsd_ring); |
4539 | intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); | 4534 | intel_cleanup_ring_buffer(&dev_priv->blt_ring); |
4540 | if (HAS_PIPE_CONTROL(dev)) | 4535 | if (HAS_PIPE_CONTROL(dev)) |
4541 | i915_gem_cleanup_pipe_control(dev); | 4536 | i915_gem_cleanup_pipe_control(dev); |
4542 | } | 4537 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 729fd0c91d7b..852a2d848bf4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -297,7 +297,7 @@ static void notify_ring(struct drm_device *dev, | |||
297 | struct intel_ring_buffer *ring) | 297 | struct intel_ring_buffer *ring) |
298 | { | 298 | { |
299 | struct drm_i915_private *dev_priv = dev->dev_private; | 299 | struct drm_i915_private *dev_priv = dev->dev_private; |
300 | u32 seqno = ring->get_seqno(dev, ring); | 300 | u32 seqno = ring->get_seqno(ring); |
301 | ring->irq_gem_seqno = seqno; | 301 | ring->irq_gem_seqno = seqno; |
302 | trace_i915_gem_request_complete(dev, seqno); | 302 | trace_i915_gem_request_complete(dev, seqno); |
303 | wake_up_all(&ring->irq_queue); | 303 | wake_up_all(&ring->irq_queue); |
@@ -586,7 +586,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
586 | DRM_DEBUG_DRIVER("generating error event\n"); | 586 | DRM_DEBUG_DRIVER("generating error event\n"); |
587 | 587 | ||
588 | error->seqno = | 588 | error->seqno = |
589 | dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); | 589 | dev_priv->render_ring.get_seqno(&dev_priv->render_ring); |
590 | error->eir = I915_READ(EIR); | 590 | error->eir = I915_READ(EIR); |
591 | error->pgtbl_er = I915_READ(PGTBL_ER); | 591 | error->pgtbl_er = I915_READ(PGTBL_ER); |
592 | error->pipeastat = I915_READ(PIPEASTAT); | 592 | error->pipeastat = I915_READ(PIPEASTAT); |
@@ -1117,7 +1117,7 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | |||
1117 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1117 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; |
1118 | 1118 | ||
1119 | if (dev_priv->trace_irq_seqno == 0) | 1119 | if (dev_priv->trace_irq_seqno == 0) |
1120 | render_ring->user_irq_get(dev, render_ring); | 1120 | render_ring->user_irq_get(render_ring); |
1121 | 1121 | ||
1122 | dev_priv->trace_irq_seqno = seqno; | 1122 | dev_priv->trace_irq_seqno = seqno; |
1123 | } | 1123 | } |
@@ -1141,10 +1141,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1141 | if (master_priv->sarea_priv) | 1141 | if (master_priv->sarea_priv) |
1142 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1142 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
1143 | 1143 | ||
1144 | render_ring->user_irq_get(dev, render_ring); | 1144 | render_ring->user_irq_get(render_ring); |
1145 | DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, | 1145 | DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, |
1146 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 1146 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
1147 | render_ring->user_irq_put(dev, render_ring); | 1147 | render_ring->user_irq_put(render_ring); |
1148 | 1148 | ||
1149 | if (ret == -EBUSY) { | 1149 | if (ret == -EBUSY) { |
1150 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 1150 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
@@ -1338,7 +1338,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1338 | 1338 | ||
1339 | /* If all work is done then ACTHD clearly hasn't advanced. */ | 1339 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
1340 | if (list_empty(&dev_priv->render_ring.request_list) || | 1340 | if (list_empty(&dev_priv->render_ring.request_list) || |
1341 | i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), | 1341 | i915_seqno_passed(dev_priv->render_ring.get_seqno(&dev_priv->render_ring), |
1342 | i915_get_tail_request(dev)->seqno)) { | 1342 | i915_get_tail_request(dev)->seqno)) { |
1343 | bool missed_wakeup = false; | 1343 | bool missed_wakeup = false; |
1344 | 1344 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae2..d6eba661105f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | static void |
52 | render_ring_flush(struct drm_device *dev, | 52 | render_ring_flush(struct intel_ring_buffer *ring, |
53 | struct intel_ring_buffer *ring, | ||
54 | u32 invalidate_domains, | 53 | u32 invalidate_domains, |
55 | u32 flush_domains) | 54 | u32 flush_domains) |
56 | { | 55 | { |
56 | struct drm_device *dev = ring->dev; | ||
57 | drm_i915_private_t *dev_priv = dev->dev_private; | 57 | drm_i915_private_t *dev_priv = dev->dev_private; |
58 | u32 cmd; | 58 | u32 cmd; |
59 | 59 | ||
@@ -112,43 +112,39 @@ render_ring_flush(struct drm_device *dev, | |||
112 | #if WATCH_EXEC | 112 | #if WATCH_EXEC |
113 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | 113 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
114 | #endif | 114 | #endif |
115 | intel_ring_begin(dev, ring, 2); | 115 | intel_ring_begin(ring, 2); |
116 | intel_ring_emit(dev, ring, cmd); | 116 | intel_ring_emit(ring, cmd); |
117 | intel_ring_emit(dev, ring, MI_NOOP); | 117 | intel_ring_emit(ring, MI_NOOP); |
118 | intel_ring_advance(dev, ring); | 118 | intel_ring_advance(ring); |
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static void ring_write_tail(struct drm_device *dev, | 122 | static void ring_write_tail(struct intel_ring_buffer *ring, |
123 | struct intel_ring_buffer *ring, | ||
124 | u32 value) | 123 | u32 value) |
125 | { | 124 | { |
126 | drm_i915_private_t *dev_priv = dev->dev_private; | 125 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
127 | I915_WRITE_TAIL(ring, value); | 126 | I915_WRITE_TAIL(ring, value); |
128 | } | 127 | } |
129 | 128 | ||
130 | u32 intel_ring_get_active_head(struct drm_device *dev, | 129 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
131 | struct intel_ring_buffer *ring) | ||
132 | { | 130 | { |
133 | drm_i915_private_t *dev_priv = dev->dev_private; | 131 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
134 | u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? | 132 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
135 | RING_ACTHD(ring->mmio_base) : ACTHD; | 133 | RING_ACTHD(ring->mmio_base) : ACTHD; |
136 | 134 | ||
137 | return I915_READ(acthd_reg); | 135 | return I915_READ(acthd_reg); |
138 | } | 136 | } |
139 | 137 | ||
140 | static int init_ring_common(struct drm_device *dev, | 138 | static int init_ring_common(struct intel_ring_buffer *ring) |
141 | struct intel_ring_buffer *ring) | ||
142 | { | 139 | { |
140 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
141 | struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object); | ||
143 | u32 head; | 142 | u32 head; |
144 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
145 | struct drm_i915_gem_object *obj_priv; | ||
146 | obj_priv = to_intel_bo(ring->gem_object); | ||
147 | 143 | ||
148 | /* Stop the ring if it's running. */ | 144 | /* Stop the ring if it's running. */ |
149 | I915_WRITE_CTL(ring, 0); | 145 | I915_WRITE_CTL(ring, 0); |
150 | I915_WRITE_HEAD(ring, 0); | 146 | I915_WRITE_HEAD(ring, 0); |
151 | ring->write_tail(dev, ring, 0); | 147 | ring->write_tail(ring, 0); |
152 | 148 | ||
153 | /* Initialize the ring. */ | 149 | /* Initialize the ring. */ |
154 | I915_WRITE_START(ring, obj_priv->gtt_offset); | 150 | I915_WRITE_START(ring, obj_priv->gtt_offset); |
@@ -192,8 +188,8 @@ static int init_ring_common(struct drm_device *dev, | |||
192 | return -EIO; | 188 | return -EIO; |
193 | } | 189 | } |
194 | 190 | ||
195 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 191 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
196 | i915_kernel_lost_context(dev); | 192 | i915_kernel_lost_context(ring->dev); |
197 | else { | 193 | else { |
198 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 194 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
199 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 195 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
@@ -204,29 +200,29 @@ static int init_ring_common(struct drm_device *dev, | |||
204 | return 0; | 200 | return 0; |
205 | } | 201 | } |
206 | 202 | ||
207 | static int init_render_ring(struct drm_device *dev, | 203 | static int init_render_ring(struct intel_ring_buffer *ring) |
208 | struct intel_ring_buffer *ring) | ||
209 | { | 204 | { |
210 | drm_i915_private_t *dev_priv = dev->dev_private; | 205 | struct drm_device *dev = ring->dev; |
211 | int ret = init_ring_common(dev, ring); | 206 | int ret = init_ring_common(ring); |
212 | int mode; | ||
213 | 207 | ||
214 | if (INTEL_INFO(dev)->gen > 3) { | 208 | if (INTEL_INFO(dev)->gen > 3) { |
215 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 209 | drm_i915_private_t *dev_priv = dev->dev_private; |
210 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | ||
216 | if (IS_GEN6(dev)) | 211 | if (IS_GEN6(dev)) |
217 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 212 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
218 | I915_WRITE(MI_MODE, mode); | 213 | I915_WRITE(MI_MODE, mode); |
219 | } | 214 | } |
215 | |||
220 | return ret; | 216 | return ret; |
221 | } | 217 | } |
222 | 218 | ||
223 | #define PIPE_CONTROL_FLUSH(addr) \ | 219 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
224 | do { \ | 220 | do { \ |
225 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | 221 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
226 | PIPE_CONTROL_DEPTH_STALL | 2); \ | 222 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
227 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | 223 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
228 | OUT_RING(0); \ | 224 | intel_ring_emit(ring__, 0); \ |
229 | OUT_RING(0); \ | 225 | intel_ring_emit(ring__, 0); \ |
230 | } while (0) | 226 | } while (0) |
231 | 227 | ||
232 | /** | 228 | /** |
@@ -238,26 +234,26 @@ do { \ | |||
238 | * Returned sequence numbers are nonzero on success. | 234 | * Returned sequence numbers are nonzero on success. |
239 | */ | 235 | */ |
240 | static u32 | 236 | static u32 |
241 | render_ring_add_request(struct drm_device *dev, | 237 | render_ring_add_request(struct intel_ring_buffer *ring, |
242 | struct intel_ring_buffer *ring, | ||
243 | u32 flush_domains) | 238 | u32 flush_domains) |
244 | { | 239 | { |
240 | struct drm_device *dev = ring->dev; | ||
245 | drm_i915_private_t *dev_priv = dev->dev_private; | 241 | drm_i915_private_t *dev_priv = dev->dev_private; |
246 | u32 seqno; | 242 | u32 seqno; |
247 | 243 | ||
248 | seqno = i915_gem_get_seqno(dev); | 244 | seqno = i915_gem_get_seqno(dev); |
249 | 245 | ||
250 | if (IS_GEN6(dev)) { | 246 | if (IS_GEN6(dev)) { |
251 | BEGIN_LP_RING(6); | 247 | intel_ring_begin(ring, 6); |
252 | OUT_RING(GFX_OP_PIPE_CONTROL | 3); | 248 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); |
253 | OUT_RING(PIPE_CONTROL_QW_WRITE | | 249 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | |
254 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | | 250 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | |
255 | PIPE_CONTROL_NOTIFY); | 251 | PIPE_CONTROL_NOTIFY); |
256 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 252 | intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
257 | OUT_RING(seqno); | 253 | intel_ring_emit(ring, seqno); |
258 | OUT_RING(0); | 254 | intel_ring_emit(ring, 0); |
259 | OUT_RING(0); | 255 | intel_ring_emit(ring, 0); |
260 | ADVANCE_LP_RING(); | 256 | intel_ring_advance(ring); |
261 | } else if (HAS_PIPE_CONTROL(dev)) { | 257 | } else if (HAS_PIPE_CONTROL(dev)) { |
262 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; | 258 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
263 | 259 | ||
@@ -266,46 +262,46 @@ render_ring_add_request(struct drm_device *dev, | |||
266 | * PIPE_NOTIFY buffers out to memory before requesting | 262 | * PIPE_NOTIFY buffers out to memory before requesting |
267 | * an interrupt. | 263 | * an interrupt. |
268 | */ | 264 | */ |
269 | BEGIN_LP_RING(32); | 265 | intel_ring_begin(ring, 32); |
270 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 266 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
271 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | 267 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); |
272 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 268 | intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
273 | OUT_RING(seqno); | 269 | intel_ring_emit(ring, seqno); |
274 | OUT_RING(0); | 270 | intel_ring_emit(ring, 0); |
275 | PIPE_CONTROL_FLUSH(scratch_addr); | 271 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
276 | scratch_addr += 128; /* write to separate cachelines */ | 272 | scratch_addr += 128; /* write to separate cachelines */ |
277 | PIPE_CONTROL_FLUSH(scratch_addr); | 273 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
278 | scratch_addr += 128; | 274 | scratch_addr += 128; |
279 | PIPE_CONTROL_FLUSH(scratch_addr); | 275 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
280 | scratch_addr += 128; | 276 | scratch_addr += 128; |
281 | PIPE_CONTROL_FLUSH(scratch_addr); | 277 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
282 | scratch_addr += 128; | 278 | scratch_addr += 128; |
283 | PIPE_CONTROL_FLUSH(scratch_addr); | 279 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
284 | scratch_addr += 128; | 280 | scratch_addr += 128; |
285 | PIPE_CONTROL_FLUSH(scratch_addr); | 281 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
286 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 282 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
287 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | 283 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | |
288 | PIPE_CONTROL_NOTIFY); | 284 | PIPE_CONTROL_NOTIFY); |
289 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 285 | intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
290 | OUT_RING(seqno); | 286 | intel_ring_emit(ring, seqno); |
291 | OUT_RING(0); | 287 | intel_ring_emit(ring, 0); |
292 | ADVANCE_LP_RING(); | 288 | intel_ring_advance(ring); |
293 | } else { | 289 | } else { |
294 | BEGIN_LP_RING(4); | 290 | intel_ring_begin(ring, 4); |
295 | OUT_RING(MI_STORE_DWORD_INDEX); | 291 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
296 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 292 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
297 | OUT_RING(seqno); | 293 | intel_ring_emit(ring, seqno); |
298 | 294 | ||
299 | OUT_RING(MI_USER_INTERRUPT); | 295 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
300 | ADVANCE_LP_RING(); | 296 | intel_ring_advance(ring); |
301 | } | 297 | } |
302 | return seqno; | 298 | return seqno; |
303 | } | 299 | } |
304 | 300 | ||
305 | static u32 | 301 | static u32 |
306 | render_ring_get_seqno(struct drm_device *dev, | 302 | render_ring_get_seqno(struct intel_ring_buffer *ring) |
307 | struct intel_ring_buffer *ring) | ||
308 | { | 303 | { |
304 | struct drm_device *dev = ring->dev; | ||
309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 305 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
310 | if (HAS_PIPE_CONTROL(dev)) | 306 | if (HAS_PIPE_CONTROL(dev)) |
311 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | 307 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; |
@@ -314,9 +310,9 @@ render_ring_get_seqno(struct drm_device *dev, | |||
314 | } | 310 | } |
315 | 311 | ||
316 | static void | 312 | static void |
317 | render_ring_get_user_irq(struct drm_device *dev, | 313 | render_ring_get_user_irq(struct intel_ring_buffer *ring) |
318 | struct intel_ring_buffer *ring) | ||
319 | { | 314 | { |
315 | struct drm_device *dev = ring->dev; | ||
320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 316 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
321 | unsigned long irqflags; | 317 | unsigned long irqflags; |
322 | 318 | ||
@@ -331,9 +327,9 @@ render_ring_get_user_irq(struct drm_device *dev, | |||
331 | } | 327 | } |
332 | 328 | ||
333 | static void | 329 | static void |
334 | render_ring_put_user_irq(struct drm_device *dev, | 330 | render_ring_put_user_irq(struct intel_ring_buffer *ring) |
335 | struct intel_ring_buffer *ring) | ||
336 | { | 331 | { |
332 | struct drm_device *dev = ring->dev; | ||
337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 333 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
338 | unsigned long irqflags; | 334 | unsigned long irqflags; |
339 | 335 | ||
@@ -348,56 +344,41 @@ render_ring_put_user_irq(struct drm_device *dev, | |||
348 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 344 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
349 | } | 345 | } |
350 | 346 | ||
351 | void intel_ring_setup_status_page(struct drm_device *dev, | 347 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
352 | struct intel_ring_buffer *ring) | ||
353 | { | 348 | { |
354 | drm_i915_private_t *dev_priv = dev->dev_private; | 349 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
355 | if (IS_GEN6(dev)) { | 350 | u32 mmio = IS_GEN6(ring->dev) ? |
356 | I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), | 351 | RING_HWS_PGA_GEN6(ring->mmio_base) : |
357 | ring->status_page.gfx_addr); | 352 | RING_HWS_PGA(ring->mmio_base); |
358 | I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ | 353 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
359 | } else { | 354 | POSTING_READ(mmio); |
360 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), | ||
361 | ring->status_page.gfx_addr); | ||
362 | I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ | ||
363 | } | ||
364 | |||
365 | } | 355 | } |
366 | 356 | ||
367 | static void | 357 | static void |
368 | bsd_ring_flush(struct drm_device *dev, | 358 | bsd_ring_flush(struct intel_ring_buffer *ring, |
369 | struct intel_ring_buffer *ring, | 359 | u32 invalidate_domains, |
370 | u32 invalidate_domains, | 360 | u32 flush_domains) |
371 | u32 flush_domains) | ||
372 | { | ||
373 | intel_ring_begin(dev, ring, 2); | ||
374 | intel_ring_emit(dev, ring, MI_FLUSH); | ||
375 | intel_ring_emit(dev, ring, MI_NOOP); | ||
376 | intel_ring_advance(dev, ring); | ||
377 | } | ||
378 | |||
379 | static int init_bsd_ring(struct drm_device *dev, | ||
380 | struct intel_ring_buffer *ring) | ||
381 | { | 361 | { |
382 | return init_ring_common(dev, ring); | 362 | intel_ring_begin(ring, 2); |
363 | intel_ring_emit(ring, MI_FLUSH); | ||
364 | intel_ring_emit(ring, MI_NOOP); | ||
365 | intel_ring_advance(ring); | ||
383 | } | 366 | } |
384 | 367 | ||
385 | static u32 | 368 | static u32 |
386 | ring_add_request(struct drm_device *dev, | 369 | ring_add_request(struct intel_ring_buffer *ring, |
387 | struct intel_ring_buffer *ring, | ||
388 | u32 flush_domains) | 370 | u32 flush_domains) |
389 | { | 371 | { |
390 | u32 seqno; | 372 | u32 seqno; |
391 | 373 | ||
392 | seqno = i915_gem_get_seqno(dev); | 374 | seqno = i915_gem_get_seqno(ring->dev); |
393 | 375 | ||
394 | intel_ring_begin(dev, ring, 4); | 376 | intel_ring_begin(ring, 4); |
395 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 377 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
396 | intel_ring_emit(dev, ring, | 378 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
397 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 379 | intel_ring_emit(ring, seqno); |
398 | intel_ring_emit(dev, ring, seqno); | 380 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
399 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | 381 | intel_ring_advance(ring); |
400 | intel_ring_advance(dev, ring); | ||
401 | 382 | ||
402 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | 383 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); |
403 | 384 | ||
@@ -405,53 +386,55 @@ ring_add_request(struct drm_device *dev, | |||
405 | } | 386 | } |
406 | 387 | ||
407 | static void | 388 | static void |
408 | bsd_ring_get_user_irq(struct drm_device *dev, | 389 | bsd_ring_get_user_irq(struct intel_ring_buffer *ring) |
409 | struct intel_ring_buffer *ring) | ||
410 | { | 390 | { |
411 | /* do nothing */ | 391 | /* do nothing */ |
412 | } | 392 | } |
413 | static void | 393 | static void |
414 | bsd_ring_put_user_irq(struct drm_device *dev, | 394 | bsd_ring_put_user_irq(struct intel_ring_buffer *ring) |
415 | struct intel_ring_buffer *ring) | ||
416 | { | 395 | { |
417 | /* do nothing */ | 396 | /* do nothing */ |
418 | } | 397 | } |
419 | 398 | ||
420 | static u32 | 399 | static u32 |
421 | ring_status_page_get_seqno(struct drm_device *dev, | 400 | ring_status_page_get_seqno(struct intel_ring_buffer *ring) |
422 | struct intel_ring_buffer *ring) | ||
423 | { | 401 | { |
424 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 402 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
425 | } | 403 | } |
426 | 404 | ||
427 | static int | 405 | static int |
428 | ring_dispatch_gem_execbuffer(struct drm_device *dev, | 406 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
429 | struct intel_ring_buffer *ring, | 407 | struct drm_i915_gem_execbuffer2 *exec, |
430 | struct drm_i915_gem_execbuffer2 *exec, | 408 | struct drm_clip_rect *cliprects, |
431 | struct drm_clip_rect *cliprects, | 409 | uint64_t exec_offset) |
432 | uint64_t exec_offset) | ||
433 | { | 410 | { |
434 | uint32_t exec_start; | 411 | uint32_t exec_start; |
412 | |||
435 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 413 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
436 | intel_ring_begin(dev, ring, 2); | 414 | |
437 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | | 415 | intel_ring_begin(ring, 2); |
438 | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 416 | intel_ring_emit(ring, |
439 | intel_ring_emit(dev, ring, exec_start); | 417 | MI_BATCH_BUFFER_START | |
440 | intel_ring_advance(dev, ring); | 418 | (2 << 6) | |
419 | MI_BATCH_NON_SECURE_I965); | ||
420 | intel_ring_emit(ring, exec_start); | ||
421 | intel_ring_advance(ring); | ||
422 | |||
441 | return 0; | 423 | return 0; |
442 | } | 424 | } |
443 | 425 | ||
444 | static int | 426 | static int |
445 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 427 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
446 | struct intel_ring_buffer *ring, | 428 | struct drm_i915_gem_execbuffer2 *exec, |
447 | struct drm_i915_gem_execbuffer2 *exec, | 429 | struct drm_clip_rect *cliprects, |
448 | struct drm_clip_rect *cliprects, | 430 | uint64_t exec_offset) |
449 | uint64_t exec_offset) | ||
450 | { | 431 | { |
432 | struct drm_device *dev = ring->dev; | ||
451 | drm_i915_private_t *dev_priv = dev->dev_private; | 433 | drm_i915_private_t *dev_priv = dev->dev_private; |
452 | int nbox = exec->num_cliprects; | 434 | int nbox = exec->num_cliprects; |
453 | int i = 0, count; | 435 | int i = 0, count; |
454 | uint32_t exec_start, exec_len; | 436 | uint32_t exec_start, exec_len; |
437 | |||
455 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 438 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
456 | exec_len = (uint32_t) exec->batch_len; | 439 | exec_len = (uint32_t) exec->batch_len; |
457 | 440 | ||
@@ -468,46 +451,44 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
468 | } | 451 | } |
469 | 452 | ||
470 | if (IS_I830(dev) || IS_845G(dev)) { | 453 | if (IS_I830(dev) || IS_845G(dev)) { |
471 | intel_ring_begin(dev, ring, 4); | 454 | intel_ring_begin(ring, 4); |
472 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER); | 455 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
473 | intel_ring_emit(dev, ring, | 456 | intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); |
474 | exec_start | MI_BATCH_NON_SECURE); | 457 | intel_ring_emit(ring, exec_start + exec_len - 4); |
475 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | 458 | intel_ring_emit(ring, 0); |
476 | intel_ring_emit(dev, ring, 0); | ||
477 | } else { | 459 | } else { |
478 | intel_ring_begin(dev, ring, 2); | 460 | intel_ring_begin(ring, 2); |
479 | if (INTEL_INFO(dev)->gen >= 4) { | 461 | if (INTEL_INFO(dev)->gen >= 4) { |
480 | intel_ring_emit(dev, ring, | 462 | intel_ring_emit(ring, |
481 | MI_BATCH_BUFFER_START | (2 << 6) | 463 | MI_BATCH_BUFFER_START | (2 << 6) |
482 | | MI_BATCH_NON_SECURE_I965); | 464 | | MI_BATCH_NON_SECURE_I965); |
483 | intel_ring_emit(dev, ring, exec_start); | 465 | intel_ring_emit(ring, exec_start); |
484 | } else { | 466 | } else { |
485 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | 467 | intel_ring_emit(ring, MI_BATCH_BUFFER_START |
486 | | (2 << 6)); | 468 | | (2 << 6)); |
487 | intel_ring_emit(dev, ring, exec_start | | 469 | intel_ring_emit(ring, exec_start | |
488 | MI_BATCH_NON_SECURE); | 470 | MI_BATCH_NON_SECURE); |
489 | } | 471 | } |
490 | } | 472 | } |
491 | intel_ring_advance(dev, ring); | 473 | intel_ring_advance(ring); |
492 | } | 474 | } |
493 | 475 | ||
494 | if (IS_G4X(dev) || IS_GEN5(dev)) { | 476 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
495 | intel_ring_begin(dev, ring, 2); | 477 | intel_ring_begin(ring, 2); |
496 | intel_ring_emit(dev, ring, MI_FLUSH | | 478 | intel_ring_emit(ring, MI_FLUSH | |
497 | MI_NO_WRITE_FLUSH | | 479 | MI_NO_WRITE_FLUSH | |
498 | MI_INVALIDATE_ISP ); | 480 | MI_INVALIDATE_ISP ); |
499 | intel_ring_emit(dev, ring, MI_NOOP); | 481 | intel_ring_emit(ring, MI_NOOP); |
500 | intel_ring_advance(dev, ring); | 482 | intel_ring_advance(ring); |
501 | } | 483 | } |
502 | /* XXX breadcrumb */ | 484 | /* XXX breadcrumb */ |
503 | 485 | ||
504 | return 0; | 486 | return 0; |
505 | } | 487 | } |
506 | 488 | ||
507 | static void cleanup_status_page(struct drm_device *dev, | 489 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
508 | struct intel_ring_buffer *ring) | ||
509 | { | 490 | { |
510 | drm_i915_private_t *dev_priv = dev->dev_private; | 491 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
511 | struct drm_gem_object *obj; | 492 | struct drm_gem_object *obj; |
512 | struct drm_i915_gem_object *obj_priv; | 493 | struct drm_i915_gem_object *obj_priv; |
513 | 494 | ||
@@ -524,9 +505,9 @@ static void cleanup_status_page(struct drm_device *dev, | |||
524 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 505 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
525 | } | 506 | } |
526 | 507 | ||
527 | static int init_status_page(struct drm_device *dev, | 508 | static int init_status_page(struct intel_ring_buffer *ring) |
528 | struct intel_ring_buffer *ring) | ||
529 | { | 509 | { |
510 | struct drm_device *dev = ring->dev; | ||
530 | drm_i915_private_t *dev_priv = dev->dev_private; | 511 | drm_i915_private_t *dev_priv = dev->dev_private; |
531 | struct drm_gem_object *obj; | 512 | struct drm_gem_object *obj; |
532 | struct drm_i915_gem_object *obj_priv; | 513 | struct drm_i915_gem_object *obj_priv; |
@@ -555,7 +536,7 @@ static int init_status_page(struct drm_device *dev, | |||
555 | ring->status_page.obj = obj; | 536 | ring->status_page.obj = obj; |
556 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 537 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
557 | 538 | ||
558 | intel_ring_setup_status_page(dev, ring); | 539 | intel_ring_setup_status_page(ring); |
559 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 540 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
560 | ring->name, ring->status_page.gfx_addr); | 541 | ring->name, ring->status_page.gfx_addr); |
561 | 542 | ||
@@ -583,7 +564,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
583 | INIT_LIST_HEAD(&ring->gpu_write_list); | 564 | INIT_LIST_HEAD(&ring->gpu_write_list); |
584 | 565 | ||
585 | if (I915_NEED_GFX_HWS(dev)) { | 566 | if (I915_NEED_GFX_HWS(dev)) { |
586 | ret = init_status_page(dev, ring); | 567 | ret = init_status_page(ring); |
587 | if (ret) | 568 | if (ret) |
588 | return ret; | 569 | return ret; |
589 | } | 570 | } |
@@ -616,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
616 | } | 597 | } |
617 | 598 | ||
618 | ring->virtual_start = ring->map.handle; | 599 | ring->virtual_start = ring->map.handle; |
619 | ret = ring->init(dev, ring); | 600 | ret = ring->init(ring); |
620 | if (ret) | 601 | if (ret) |
621 | goto err_unmap; | 602 | goto err_unmap; |
622 | 603 | ||
@@ -639,33 +620,32 @@ err_unref: | |||
639 | drm_gem_object_unreference(obj); | 620 | drm_gem_object_unreference(obj); |
640 | ring->gem_object = NULL; | 621 | ring->gem_object = NULL; |
641 | err_hws: | 622 | err_hws: |
642 | cleanup_status_page(dev, ring); | 623 | cleanup_status_page(ring); |
643 | return ret; | 624 | return ret; |
644 | } | 625 | } |
645 | 626 | ||
646 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 627 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
647 | struct intel_ring_buffer *ring) | ||
648 | { | 628 | { |
649 | if (ring->gem_object == NULL) | 629 | if (ring->gem_object == NULL) |
650 | return; | 630 | return; |
651 | 631 | ||
652 | drm_core_ioremapfree(&ring->map, dev); | 632 | drm_core_ioremapfree(&ring->map, ring->dev); |
653 | 633 | ||
654 | i915_gem_object_unpin(ring->gem_object); | 634 | i915_gem_object_unpin(ring->gem_object); |
655 | drm_gem_object_unreference(ring->gem_object); | 635 | drm_gem_object_unreference(ring->gem_object); |
656 | ring->gem_object = NULL; | 636 | ring->gem_object = NULL; |
657 | cleanup_status_page(dev, ring); | 637 | |
638 | cleanup_status_page(ring); | ||
658 | } | 639 | } |
659 | 640 | ||
660 | static int intel_wrap_ring_buffer(struct drm_device *dev, | 641 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
661 | struct intel_ring_buffer *ring) | ||
662 | { | 642 | { |
663 | unsigned int *virt; | 643 | unsigned int *virt; |
664 | int rem; | 644 | int rem; |
665 | rem = ring->size - ring->tail; | 645 | rem = ring->size - ring->tail; |
666 | 646 | ||
667 | if (ring->space < rem) { | 647 | if (ring->space < rem) { |
668 | int ret = intel_wait_ring_buffer(dev, ring, rem); | 648 | int ret = intel_wait_ring_buffer(ring, rem); |
669 | if (ret) | 649 | if (ret) |
670 | return ret; | 650 | return ret; |
671 | } | 651 | } |
@@ -683,11 +663,11 @@ static int intel_wrap_ring_buffer(struct drm_device *dev, | |||
683 | return 0; | 663 | return 0; |
684 | } | 664 | } |
685 | 665 | ||
686 | int intel_wait_ring_buffer(struct drm_device *dev, | 666 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
687 | struct intel_ring_buffer *ring, int n) | ||
688 | { | 667 | { |
689 | unsigned long end; | 668 | struct drm_device *dev = ring->dev; |
690 | drm_i915_private_t *dev_priv = dev->dev_private; | 669 | drm_i915_private_t *dev_priv = dev->dev_private; |
670 | unsigned long end; | ||
691 | 671 | ||
692 | trace_i915_ring_wait_begin (dev); | 672 | trace_i915_ring_wait_begin (dev); |
693 | end = jiffies + 3 * HZ; | 673 | end = jiffies + 3 * HZ; |
@@ -697,7 +677,7 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
697 | if (ring->space < 0) | 677 | if (ring->space < 0) |
698 | ring->space += ring->size; | 678 | ring->space += ring->size; |
699 | if (ring->space >= n) { | 679 | if (ring->space >= n) { |
700 | trace_i915_ring_wait_end (dev); | 680 | trace_i915_ring_wait_end(dev); |
701 | return 0; | 681 | return 0; |
702 | } | 682 | } |
703 | 683 | ||
@@ -713,24 +693,24 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
713 | return -EBUSY; | 693 | return -EBUSY; |
714 | } | 694 | } |
715 | 695 | ||
716 | void intel_ring_begin(struct drm_device *dev, | 696 | void intel_ring_begin(struct intel_ring_buffer *ring, |
717 | struct intel_ring_buffer *ring, | ||
718 | int num_dwords) | 697 | int num_dwords) |
719 | { | 698 | { |
720 | int n = 4*num_dwords; | 699 | int n = 4*num_dwords; |
700 | |||
721 | if (unlikely(ring->tail + n > ring->size)) | 701 | if (unlikely(ring->tail + n > ring->size)) |
722 | intel_wrap_ring_buffer(dev, ring); | 702 | intel_wrap_ring_buffer(ring); |
703 | |||
723 | if (unlikely(ring->space < n)) | 704 | if (unlikely(ring->space < n)) |
724 | intel_wait_ring_buffer(dev, ring, n); | 705 | intel_wait_ring_buffer(ring, n); |
725 | 706 | ||
726 | ring->space -= n; | 707 | ring->space -= n; |
727 | } | 708 | } |
728 | 709 | ||
729 | void intel_ring_advance(struct drm_device *dev, | 710 | void intel_ring_advance(struct intel_ring_buffer *ring) |
730 | struct intel_ring_buffer *ring) | ||
731 | { | 711 | { |
732 | ring->tail &= ring->size - 1; | 712 | ring->tail &= ring->size - 1; |
733 | ring->write_tail(dev, ring, ring->tail); | 713 | ring->write_tail(ring, ring->tail); |
734 | } | 714 | } |
735 | 715 | ||
736 | static const struct intel_ring_buffer render_ring = { | 716 | static const struct intel_ring_buffer render_ring = { |
@@ -745,7 +725,7 @@ static const struct intel_ring_buffer render_ring = { | |||
745 | .get_seqno = render_ring_get_seqno, | 725 | .get_seqno = render_ring_get_seqno, |
746 | .user_irq_get = render_ring_get_user_irq, | 726 | .user_irq_get = render_ring_get_user_irq, |
747 | .user_irq_put = render_ring_put_user_irq, | 727 | .user_irq_put = render_ring_put_user_irq, |
748 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | 728 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
749 | }; | 729 | }; |
750 | 730 | ||
751 | /* ring buffer for bit-stream decoder */ | 731 | /* ring buffer for bit-stream decoder */ |
@@ -755,22 +735,21 @@ static const struct intel_ring_buffer bsd_ring = { | |||
755 | .id = RING_BSD, | 735 | .id = RING_BSD, |
756 | .mmio_base = BSD_RING_BASE, | 736 | .mmio_base = BSD_RING_BASE, |
757 | .size = 32 * PAGE_SIZE, | 737 | .size = 32 * PAGE_SIZE, |
758 | .init = init_bsd_ring, | 738 | .init = init_ring_common, |
759 | .write_tail = ring_write_tail, | 739 | .write_tail = ring_write_tail, |
760 | .flush = bsd_ring_flush, | 740 | .flush = bsd_ring_flush, |
761 | .add_request = ring_add_request, | 741 | .add_request = ring_add_request, |
762 | .get_seqno = ring_status_page_get_seqno, | 742 | .get_seqno = ring_status_page_get_seqno, |
763 | .user_irq_get = bsd_ring_get_user_irq, | 743 | .user_irq_get = bsd_ring_get_user_irq, |
764 | .user_irq_put = bsd_ring_put_user_irq, | 744 | .user_irq_put = bsd_ring_put_user_irq, |
765 | .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, | 745 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
766 | }; | 746 | }; |
767 | 747 | ||
768 | 748 | ||
769 | static void gen6_bsd_ring_write_tail(struct drm_device *dev, | 749 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
770 | struct intel_ring_buffer *ring, | ||
771 | u32 value) | 750 | u32 value) |
772 | { | 751 | { |
773 | drm_i915_private_t *dev_priv = dev->dev_private; | 752 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
774 | 753 | ||
775 | /* Every tail move must follow the sequence below */ | 754 | /* Every tail move must follow the sequence below */ |
776 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 755 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
@@ -789,36 +768,33 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev, | |||
789 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 768 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
790 | } | 769 | } |
791 | 770 | ||
792 | static void gen6_ring_flush(struct drm_device *dev, | 771 | static void gen6_ring_flush(struct intel_ring_buffer *ring, |
793 | struct intel_ring_buffer *ring, | ||
794 | u32 invalidate_domains, | 772 | u32 invalidate_domains, |
795 | u32 flush_domains) | 773 | u32 flush_domains) |
796 | { | 774 | { |
797 | intel_ring_begin(dev, ring, 4); | 775 | intel_ring_begin(ring, 4); |
798 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | 776 | intel_ring_emit(ring, MI_FLUSH_DW); |
799 | intel_ring_emit(dev, ring, 0); | 777 | intel_ring_emit(ring, 0); |
800 | intel_ring_emit(dev, ring, 0); | 778 | intel_ring_emit(ring, 0); |
801 | intel_ring_emit(dev, ring, 0); | 779 | intel_ring_emit(ring, 0); |
802 | intel_ring_advance(dev, ring); | 780 | intel_ring_advance(ring); |
803 | } | 781 | } |
804 | 782 | ||
805 | static int | 783 | static int |
806 | gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 784 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
807 | struct intel_ring_buffer *ring, | 785 | struct drm_i915_gem_execbuffer2 *exec, |
808 | struct drm_i915_gem_execbuffer2 *exec, | 786 | struct drm_clip_rect *cliprects, |
809 | struct drm_clip_rect *cliprects, | 787 | uint64_t exec_offset) |
810 | uint64_t exec_offset) | ||
811 | { | 788 | { |
812 | uint32_t exec_start; | 789 | uint32_t exec_start; |
813 | 790 | ||
814 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 791 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
815 | 792 | ||
816 | intel_ring_begin(dev, ring, 2); | 793 | intel_ring_begin(ring, 2); |
817 | intel_ring_emit(dev, ring, | 794 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
818 | MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
819 | /* bit0-7 is the length on GEN6+ */ | 795 | /* bit0-7 is the length on GEN6+ */ |
820 | intel_ring_emit(dev, ring, exec_start); | 796 | intel_ring_emit(ring, exec_start); |
821 | intel_ring_advance(dev, ring); | 797 | intel_ring_advance(ring); |
822 | 798 | ||
823 | return 0; | 799 | return 0; |
824 | } | 800 | } |
@@ -829,27 +805,25 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
829 | .id = RING_BSD, | 805 | .id = RING_BSD, |
830 | .mmio_base = GEN6_BSD_RING_BASE, | 806 | .mmio_base = GEN6_BSD_RING_BASE, |
831 | .size = 32 * PAGE_SIZE, | 807 | .size = 32 * PAGE_SIZE, |
832 | .init = init_bsd_ring, | 808 | .init = init_ring_common, |
833 | .write_tail = gen6_bsd_ring_write_tail, | 809 | .write_tail = gen6_bsd_ring_write_tail, |
834 | .flush = gen6_ring_flush, | 810 | .flush = gen6_ring_flush, |
835 | .add_request = ring_add_request, | 811 | .add_request = ring_add_request, |
836 | .get_seqno = ring_status_page_get_seqno, | 812 | .get_seqno = ring_status_page_get_seqno, |
837 | .user_irq_get = bsd_ring_get_user_irq, | 813 | .user_irq_get = bsd_ring_get_user_irq, |
838 | .user_irq_put = bsd_ring_put_user_irq, | 814 | .user_irq_put = bsd_ring_put_user_irq, |
839 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 815 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
840 | }; | 816 | }; |
841 | 817 | ||
842 | /* Blitter support (SandyBridge+) */ | 818 | /* Blitter support (SandyBridge+) */ |
843 | 819 | ||
844 | static void | 820 | static void |
845 | blt_ring_get_user_irq(struct drm_device *dev, | 821 | blt_ring_get_user_irq(struct intel_ring_buffer *ring) |
846 | struct intel_ring_buffer *ring) | ||
847 | { | 822 | { |
848 | /* do nothing */ | 823 | /* do nothing */ |
849 | } | 824 | } |
850 | static void | 825 | static void |
851 | blt_ring_put_user_irq(struct drm_device *dev, | 826 | blt_ring_put_user_irq(struct intel_ring_buffer *ring) |
852 | struct intel_ring_buffer *ring) | ||
853 | { | 827 | { |
854 | /* do nothing */ | 828 | /* do nothing */ |
855 | } | 829 | } |
@@ -866,7 +840,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
866 | .get_seqno = ring_status_page_get_seqno, | 840 | .get_seqno = ring_status_page_get_seqno, |
867 | .user_irq_get = blt_ring_get_user_irq, | 841 | .user_irq_get = blt_ring_get_user_irq, |
868 | .user_irq_put = blt_ring_put_user_irq, | 842 | .user_irq_put = blt_ring_put_user_irq, |
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 843 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
870 | }; | 844 | }; |
871 | 845 | ||
872 | int intel_init_render_ring_buffer(struct drm_device *dev) | 846 | int intel_init_render_ring_buffer(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a05aff0e5764..ba4a393e6d16 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | struct intel_hw_status_page { | 4 | struct intel_hw_status_page { |
5 | void *page_addr; | 5 | u32 __iomem *page_addr; |
6 | unsigned int gfx_addr; | 6 | unsigned int gfx_addr; |
7 | struct drm_gem_object *obj; | 7 | struct drm_gem_object *obj; |
8 | }; | 8 | }; |
@@ -38,31 +38,23 @@ struct intel_ring_buffer { | |||
38 | u32 irq_gem_seqno; /* last seq seem at irq time */ | 38 | u32 irq_gem_seqno; /* last seq seem at irq time */ |
39 | u32 waiting_gem_seqno; | 39 | u32 waiting_gem_seqno; |
40 | int user_irq_refcount; | 40 | int user_irq_refcount; |
41 | void (*user_irq_get)(struct drm_device *dev, | 41 | void (*user_irq_get)(struct intel_ring_buffer *ring); |
42 | struct intel_ring_buffer *ring); | 42 | void (*user_irq_put)(struct intel_ring_buffer *ring); |
43 | void (*user_irq_put)(struct drm_device *dev, | ||
44 | struct intel_ring_buffer *ring); | ||
45 | 43 | ||
46 | int (*init)(struct drm_device *dev, | 44 | int (*init)(struct intel_ring_buffer *ring); |
47 | struct intel_ring_buffer *ring); | ||
48 | 45 | ||
49 | void (*write_tail)(struct drm_device *dev, | 46 | void (*write_tail)(struct intel_ring_buffer *ring, |
50 | struct intel_ring_buffer *ring, | ||
51 | u32 value); | 47 | u32 value); |
52 | void (*flush)(struct drm_device *dev, | 48 | void (*flush)(struct intel_ring_buffer *ring, |
53 | struct intel_ring_buffer *ring, | 49 | u32 invalidate_domains, |
54 | u32 invalidate_domains, | 50 | u32 flush_domains); |
55 | u32 flush_domains); | 51 | u32 (*add_request)(struct intel_ring_buffer *ring, |
56 | u32 (*add_request)(struct drm_device *dev, | 52 | u32 flush_domains); |
57 | struct intel_ring_buffer *ring, | 53 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
58 | u32 flush_domains); | 54 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
59 | u32 (*get_seqno)(struct drm_device *dev, | 55 | struct drm_i915_gem_execbuffer2 *exec, |
60 | struct intel_ring_buffer *ring); | 56 | struct drm_clip_rect *cliprects, |
61 | int (*dispatch_gem_execbuffer)(struct drm_device *dev, | 57 | uint64_t exec_offset); |
62 | struct intel_ring_buffer *ring, | ||
63 | struct drm_i915_gem_execbuffer2 *exec, | ||
64 | struct drm_clip_rect *cliprects, | ||
65 | uint64_t exec_offset); | ||
66 | 58 | ||
67 | /** | 59 | /** |
68 | * List of objects currently involved in rendering from the | 60 | * List of objects currently involved in rendering from the |
@@ -102,43 +94,31 @@ struct intel_ring_buffer { | |||
102 | 94 | ||
103 | static inline u32 | 95 | static inline u32 |
104 | intel_read_status_page(struct intel_ring_buffer *ring, | 96 | intel_read_status_page(struct intel_ring_buffer *ring, |
105 | int reg) | 97 | int reg) |
106 | { | 98 | { |
107 | u32 *regs = ring->status_page.page_addr; | 99 | return ioread32(ring->status_page.page_addr + reg); |
108 | return regs[reg]; | ||
109 | } | 100 | } |
110 | 101 | ||
111 | int intel_init_ring_buffer(struct drm_device *dev, | 102 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
112 | struct intel_ring_buffer *ring); | 103 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
113 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 104 | void intel_ring_begin(struct intel_ring_buffer *ring, int n); |
114 | struct intel_ring_buffer *ring); | 105 | |
115 | int intel_wait_ring_buffer(struct drm_device *dev, | 106 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
116 | struct intel_ring_buffer *ring, int n); | 107 | u32 data) |
117 | void intel_ring_begin(struct drm_device *dev, | ||
118 | struct intel_ring_buffer *ring, int n); | ||
119 | |||
120 | static inline void intel_ring_emit(struct drm_device *dev, | ||
121 | struct intel_ring_buffer *ring, | ||
122 | unsigned int data) | ||
123 | { | 108 | { |
124 | unsigned int *virt = ring->virtual_start + ring->tail; | 109 | iowrite32(data, ring->virtual_start + ring->tail); |
125 | *virt = data; | ||
126 | ring->tail += 4; | 110 | ring->tail += 4; |
127 | } | 111 | } |
128 | 112 | ||
129 | void intel_ring_advance(struct drm_device *dev, | 113 | void intel_ring_advance(struct intel_ring_buffer *ring); |
130 | struct intel_ring_buffer *ring); | ||
131 | 114 | ||
132 | u32 intel_ring_get_seqno(struct drm_device *dev, | 115 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
133 | struct intel_ring_buffer *ring); | ||
134 | 116 | ||
135 | int intel_init_render_ring_buffer(struct drm_device *dev); | 117 | int intel_init_render_ring_buffer(struct drm_device *dev); |
136 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | 118 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
137 | int intel_init_blt_ring_buffer(struct drm_device *dev); | 119 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
138 | 120 | ||
139 | u32 intel_ring_get_active_head(struct drm_device *dev, | 121 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
140 | struct intel_ring_buffer *ring); | 122 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
141 | void intel_ring_setup_status_page(struct drm_device *dev, | ||
142 | struct intel_ring_buffer *ring); | ||
143 | 123 | ||
144 | #endif /* _INTEL_RINGBUFFER_H_ */ | 124 | #endif /* _INTEL_RINGBUFFER_H_ */ |