aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorZou Nan hai <nanhai.zou@intel.com>2010-05-20 21:08:57 -0400
committerEric Anholt <eric@anholt.net>2010-05-26 16:46:58 -0400
commitd1b851fc0d105caa6b6e3e7c92d2987dfb52cbe0 (patch)
tree19f9c7d935725c513cd56ed14191f2827afe2f38 /drivers/gpu/drm/i915
parent852835f343146a82a528c3b712b373661d4fa17a (diff)
drm/i915: implement BSD ring buffer V2
The BSD (bit stream decoder) ring is used for accessing the BSD engine which decodes video bitstream for H.264 and VC1 on G45+. It is asynchronous with the render ring and has access to separate parts of the GPU from it, though the render cache is coherent between the two. Signed-off-by: Zou Nan hai <nanhai.zou@intel.com> Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c107
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c153
6 files changed, 276 insertions, 15 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f485880300ce..1dbed700800e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -130,6 +130,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
130 drm_irq_uninstall(dev); 130 drm_irq_uninstall(dev);
131 131
132 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 132 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
133 if (HAS_BSD(dev))
134 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
133 135
134 /* Clear the HWS virtual address at teardown */ 136 /* Clear the HWS virtual address at teardown */
135 if (I915_NEED_GFX_HWS(dev)) 137 if (I915_NEED_GFX_HWS(dev))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3f35989ba74c..6bc0fc080f2b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -235,6 +235,7 @@ typedef struct drm_i915_private {
235 235
236 struct pci_dev *bridge_dev; 236 struct pci_dev *bridge_dev;
237 struct intel_ring_buffer render_ring; 237 struct intel_ring_buffer render_ring;
238 struct intel_ring_buffer bsd_ring;
238 239
239 drm_dma_handle_t *status_page_dmah; 240 drm_dma_handle_t *status_page_dmah;
240 void *hw_status_page; 241 void *hw_status_page;
@@ -1121,6 +1122,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1121 (dev)->pci_device == 0x2A42 || \ 1122 (dev)->pci_device == 0x2A42 || \
1122 (dev)->pci_device == 0x2E42) 1123 (dev)->pci_device == 0x2E42)
1123 1124
1125#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
1124#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1126#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1125 1127
1126/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1128/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index af664ba923c5..c51495f15718 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1730,7 +1730,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1730 1730
1731uint32_t 1731uint32_t
1732i915_get_gem_seqno(struct drm_device *dev, 1732i915_get_gem_seqno(struct drm_device *dev,
1733 struct intel_ring_buffer *ring) 1733 struct intel_ring_buffer *ring)
1734{ 1734{
1735 return ring->get_gem_seqno(dev, ring); 1735 return ring->get_gem_seqno(dev, ring);
1736} 1736}
@@ -1792,8 +1792,13 @@ i915_gem_retire_work_handler(struct work_struct *work)
1792 mutex_lock(&dev->struct_mutex); 1792 mutex_lock(&dev->struct_mutex);
1793 i915_gem_retire_requests(dev, &dev_priv->render_ring); 1793 i915_gem_retire_requests(dev, &dev_priv->render_ring);
1794 1794
1795 if (HAS_BSD(dev))
1796 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
1797
1795 if (!dev_priv->mm.suspended && 1798 if (!dev_priv->mm.suspended &&
1796 (!list_empty(&dev_priv->render_ring.request_list))) 1799 (!list_empty(&dev_priv->render_ring.request_list) ||
1800 (HAS_BSD(dev) &&
1801 !list_empty(&dev_priv->bsd_ring.request_list))))
1797 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1802 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1798 mutex_unlock(&dev->struct_mutex); 1803 mutex_unlock(&dev->struct_mutex);
1799} 1804}
@@ -1883,6 +1888,11 @@ i915_gem_flush(struct drm_device *dev,
1883 dev_priv->render_ring.flush(dev, &dev_priv->render_ring, 1888 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1884 invalidate_domains, 1889 invalidate_domains,
1885 flush_domains); 1890 flush_domains);
1891
1892 if (HAS_BSD(dev))
1893 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1894 invalidate_domains,
1895 flush_domains);
1886} 1896}
1887 1897
1888static void 1898static void
@@ -2039,12 +2049,14 @@ i915_gpu_idle(struct drm_device *dev)
2039{ 2049{
2040 drm_i915_private_t *dev_priv = dev->dev_private; 2050 drm_i915_private_t *dev_priv = dev->dev_private;
2041 bool lists_empty; 2051 bool lists_empty;
2042 uint32_t seqno; 2052 uint32_t seqno1, seqno2;
2043 int ret; 2053 int ret;
2044 2054
2045 spin_lock(&dev_priv->mm.active_list_lock); 2055 spin_lock(&dev_priv->mm.active_list_lock);
2046 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 2056 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2047 list_empty(&dev_priv->render_ring.active_list); 2057 list_empty(&dev_priv->render_ring.active_list) &&
2058 (!HAS_BSD(dev) ||
2059 list_empty(&dev_priv->bsd_ring.active_list)));
2048 spin_unlock(&dev_priv->mm.active_list_lock); 2060 spin_unlock(&dev_priv->mm.active_list_lock);
2049 2061
2050 if (lists_empty) 2062 if (lists_empty)
@@ -2052,11 +2064,23 @@ i915_gpu_idle(struct drm_device *dev)
2052 2064
2053 /* Flush everything onto the inactive list. */ 2065 /* Flush everything onto the inactive list. */
2054 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2066 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2055 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, 2067 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2056 &dev_priv->render_ring); 2068 &dev_priv->render_ring);
2057 if (seqno == 0) 2069 if (seqno1 == 0)
2058 return -ENOMEM; 2070 return -ENOMEM;
2059 ret = i915_wait_request(dev, seqno, &dev_priv->render_ring); 2071 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2072
2073 if (HAS_BSD(dev)) {
2074 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2075 &dev_priv->bsd_ring);
2076 if (seqno2 == 0)
2077 return -ENOMEM;
2078
2079 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2080 if (ret)
2081 return ret;
2082 }
2083
2060 2084
2061 return ret; 2085 return ret;
2062} 2086}
@@ -2071,7 +2095,9 @@ i915_gem_evict_everything(struct drm_device *dev)
2071 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
2072 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2096 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2073 list_empty(&dev_priv->mm.flushing_list) && 2097 list_empty(&dev_priv->mm.flushing_list) &&
2074 list_empty(&dev_priv->render_ring.active_list)); 2098 list_empty(&dev_priv->render_ring.active_list) &&
2099 (!HAS_BSD(dev)
2100 || list_empty(&dev_priv->bsd_ring.active_list)));
2075 spin_unlock(&dev_priv->mm.active_list_lock); 2101 spin_unlock(&dev_priv->mm.active_list_lock);
2076 2102
2077 if (lists_empty) 2103 if (lists_empty)
@@ -2091,7 +2117,9 @@ i915_gem_evict_everything(struct drm_device *dev)
2091 spin_lock(&dev_priv->mm.active_list_lock); 2117 spin_lock(&dev_priv->mm.active_list_lock);
2092 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2118 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2093 list_empty(&dev_priv->mm.flushing_list) && 2119 list_empty(&dev_priv->mm.flushing_list) &&
2094 list_empty(&dev_priv->render_ring.active_list)); 2120 list_empty(&dev_priv->render_ring.active_list) &&
2121 (!HAS_BSD(dev)
2122 || list_empty(&dev_priv->bsd_ring.active_list)));
2095 spin_unlock(&dev_priv->mm.active_list_lock); 2123 spin_unlock(&dev_priv->mm.active_list_lock);
2096 BUG_ON(!lists_empty); 2124 BUG_ON(!lists_empty);
2097 2125
@@ -2106,9 +2134,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2106 int ret; 2134 int ret;
2107 2135
2108 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 2136 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2137 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2109 for (;;) { 2138 for (;;) {
2110 i915_gem_retire_requests(dev, render_ring); 2139 i915_gem_retire_requests(dev, render_ring);
2111 2140
2141 if (HAS_BSD(dev))
2142 i915_gem_retire_requests(dev, bsd_ring);
2143
2112 /* If there's an inactive buffer available now, grab it 2144 /* If there's an inactive buffer available now, grab it
2113 * and be done. 2145 * and be done.
2114 */ 2146 */
@@ -2146,6 +2178,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2146 continue; 2178 continue;
2147 } 2179 }
2148 2180
2181 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2182 struct drm_i915_gem_request *request;
2183
2184 request = list_first_entry(&bsd_ring->request_list,
2185 struct drm_i915_gem_request,
2186 list);
2187
2188 ret = i915_wait_request(dev,
2189 request->seqno, request->ring);
2190 if (ret)
2191 return ret;
2192
2193 continue;
2194 }
2195
2149 /* If we didn't have anything on the request list but there 2196 /* If we didn't have anything on the request list but there
2150 * are buffers awaiting a flush, emit one and try again. 2197 * are buffers awaiting a flush, emit one and try again.
2151 * When we wait on it, those buffers waiting for that flush 2198 * When we wait on it, those buffers waiting for that flush
@@ -3641,6 +3688,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3641 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3688 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3642 (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3689 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3643#endif 3690#endif
3691 if (args->flags & I915_EXEC_BSD) {
3692 if (!HAS_BSD(dev)) {
3693 DRM_ERROR("execbuf with wrong flag\n");
3694 return -EINVAL;
3695 }
3696 ring = &dev_priv->bsd_ring;
3697 } else {
3698 ring = &dev_priv->render_ring;
3699 }
3700
3644 3701
3645 if (args->buffer_count < 1) { 3702 if (args->buffer_count < 1) {
3646 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3703 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3694,8 +3751,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3694 goto pre_mutex_err; 3751 goto pre_mutex_err;
3695 } 3752 }
3696 3753
3697 ring = &dev_priv->render_ring;
3698
3699 /* Look up object handles */ 3754 /* Look up object handles */
3700 flips = 0; 3755 flips = 0;
3701 for (i = 0; i < args->buffer_count; i++) { 3756 for (i = 0; i < args->buffer_count; i++) {
@@ -3834,6 +3889,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3834 dev->flush_domains, 3889 dev->flush_domains,
3835 &dev_priv->render_ring); 3890 &dev_priv->render_ring);
3836 3891
3892 if (HAS_BSD(dev))
3893 (void)i915_add_request(dev, file_priv,
3894 dev->flush_domains,
3895 &dev_priv->bsd_ring);
3837 } 3896 }
3838 } 3897 }
3839 3898
@@ -4267,6 +4326,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4267 */ 4326 */
4268 i915_gem_retire_requests(dev, &dev_priv->render_ring); 4327 i915_gem_retire_requests(dev, &dev_priv->render_ring);
4269 4328
4329 if (HAS_BSD(dev))
4330 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
4331
4270 obj_priv = to_intel_bo(obj); 4332 obj_priv = to_intel_bo(obj);
4271 /* Don't count being on the flushing list against the object being 4333 /* Don't count being on the flushing list against the object being
4272 * done. Otherwise, a buffer left on the flushing list but not getting 4334 * done. Otherwise, a buffer left on the flushing list but not getting
@@ -4433,7 +4495,9 @@ i915_gem_idle(struct drm_device *dev)
4433 mutex_lock(&dev->struct_mutex); 4495 mutex_lock(&dev->struct_mutex);
4434 4496
4435 if (dev_priv->mm.suspended || 4497 if (dev_priv->mm.suspended ||
4436 dev_priv->render_ring.gem_object == NULL) { 4498 (dev_priv->render_ring.gem_object == NULL) ||
4499 (HAS_BSD(dev) &&
4500 dev_priv->bsd_ring.gem_object == NULL)) {
4437 mutex_unlock(&dev->struct_mutex); 4501 mutex_unlock(&dev->struct_mutex);
4438 return 0; 4502 return 0;
4439 } 4503 }
@@ -4550,6 +4614,10 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4550 return ret; 4614 return ret;
4551 } 4615 }
4552 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); 4616 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4617 if (!ret && HAS_BSD(dev)) {
4618 dev_priv->bsd_ring = bsd_ring;
4619 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4620 }
4553 return ret; 4621 return ret;
4554} 4622}
4555 4623
@@ -4559,6 +4627,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4559 drm_i915_private_t *dev_priv = dev->dev_private; 4627 drm_i915_private_t *dev_priv = dev->dev_private;
4560 4628
4561 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 4629 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4630 if (HAS_BSD(dev))
4631 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4562 if (HAS_PIPE_CONTROL(dev)) 4632 if (HAS_PIPE_CONTROL(dev))
4563 i915_gem_cleanup_pipe_control(dev); 4633 i915_gem_cleanup_pipe_control(dev);
4564} 4634}
@@ -4589,11 +4659,13 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4589 4659
4590 spin_lock(&dev_priv->mm.active_list_lock); 4660 spin_lock(&dev_priv->mm.active_list_lock);
4591 BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); 4661 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4662 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4592 spin_unlock(&dev_priv->mm.active_list_lock); 4663 spin_unlock(&dev_priv->mm.active_list_lock);
4593 4664
4594 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4665 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4595 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4666 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4596 BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 4667 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4668 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4597 mutex_unlock(&dev->struct_mutex); 4669 mutex_unlock(&dev->struct_mutex);
4598 4670
4599 drm_irq_install(dev); 4671 drm_irq_install(dev);
@@ -4638,6 +4710,10 @@ i915_gem_load(struct drm_device *dev)
4638 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4710 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4639 INIT_LIST_HEAD(&dev_priv->render_ring.active_list); 4711 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4640 INIT_LIST_HEAD(&dev_priv->render_ring.request_list); 4712 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4713 if (HAS_BSD(dev)) {
4714 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4715 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4716 }
4641 for (i = 0; i < 16; i++) 4717 for (i = 0; i < 16; i++)
4642 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4718 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4643 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4719 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -4874,6 +4950,8 @@ i915_gpu_is_active(struct drm_device *dev)
4874 spin_lock(&dev_priv->mm.active_list_lock); 4950 spin_lock(&dev_priv->mm.active_list_lock);
4875 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4951 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4876 list_empty(&dev_priv->render_ring.active_list); 4952 list_empty(&dev_priv->render_ring.active_list);
4953 if (HAS_BSD(dev))
4954 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
4877 spin_unlock(&dev_priv->mm.active_list_lock); 4955 spin_unlock(&dev_priv->mm.active_list_lock);
4878 4956
4879 return !lists_empty; 4957 return !lists_empty;
@@ -4920,6 +4998,9 @@ rescan:
4920 spin_unlock(&shrink_list_lock); 4998 spin_unlock(&shrink_list_lock);
4921 i915_gem_retire_requests(dev, &dev_priv->render_ring); 4999 i915_gem_retire_requests(dev, &dev_priv->render_ring);
4922 5000
5001 if (HAS_BSD(dev))
5002 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
5003
4923 list_for_each_entry_safe(obj_priv, next_obj, 5004 list_for_each_entry_safe(obj_priv, next_obj,
4924 &dev_priv->mm.inactive_list, 5005 &dev_priv->mm.inactive_list,
4925 list) { 5006 list) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8a667f1db75a..0a3a5806a12e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -53,7 +53,7 @@
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
54 54
55/** Interrupts that we mask and unmask at runtime. */ 55/** Interrupts that we mask and unmask at runtime. */
56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
57 57
58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS) 59 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -362,6 +362,9 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
362 dev_priv->hangcheck_count = 0; 362 dev_priv->hangcheck_count = 0;
363 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 363 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
364 } 364 }
365 if (gt_iir & GT_BSD_USER_INTERRUPT)
366 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
367
365 368
366 if (de_iir & DE_GSE) 369 if (de_iir & DE_GSE)
367 ironlake_opregion_gse_intr(dev); 370 ironlake_opregion_gse_intr(dev);
@@ -944,6 +947,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
944 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 947 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
945 } 948 }
946 949
950 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
951 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
952
947 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 953 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
948 intel_prepare_page_flip(dev, 0); 954 intel_prepare_page_flip(dev, 0);
949 955
@@ -1297,7 +1303,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1297 /* enable kind of interrupts always enabled */ 1303 /* enable kind of interrupts always enabled */
1298 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1304 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1299 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1305 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1300 u32 render_mask = GT_PIPE_NOTIFY; 1306 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
1301 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1307 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1302 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1308 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1303 1309
@@ -1376,6 +1382,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1376 1382
1377 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); 1383 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
1378 1384
1385 if (HAS_BSD(dev))
1386 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
1387
1379 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1388 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1380 1389
1381 if (HAS_PCH_SPLIT(dev)) 1390 if (HAS_PCH_SPLIT(dev))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f3e39cc46f0d..784cf3c914e9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,6 +334,7 @@
334#define I915_DEBUG_INTERRUPT (1<<2) 334#define I915_DEBUG_INTERRUPT (1<<2)
335#define I915_USER_INTERRUPT (1<<1) 335#define I915_USER_INTERRUPT (1<<1)
336#define I915_ASLE_INTERRUPT (1<<0) 336#define I915_ASLE_INTERRUPT (1<<0)
337#define I915_BSD_USER_INTERRUPT (1<<25)
337#define EIR 0x020b0 338#define EIR 0x020b0
338#define EMR 0x020b4 339#define EMR 0x020b4
339#define ESR 0x020b8 340#define ESR 0x020b8
@@ -368,6 +369,17 @@
368#define BB_ADDR 0x02140 /* 8 bytes */ 369#define BB_ADDR 0x02140 /* 8 bytes */
369#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 370#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
370 371
372/*
373 * BSD (bit stream decoder instruction and interrupt control register defines
374 * (G4X and Ironlake only)
375 */
376
377#define BSD_RING_TAIL 0x04030
378#define BSD_RING_HEAD 0x04034
379#define BSD_RING_START 0x04038
380#define BSD_RING_CTL 0x0403c
381#define BSD_RING_ACTHD 0x04074
382#define BSD_HWS_PGA 0x04080
371 383
372/* 384/*
373 * Framebuffer compression (915+ only) 385 * Framebuffer compression (915+ only)
@@ -2355,6 +2367,8 @@
2355#define GT_PIPE_NOTIFY (1 << 4) 2367#define GT_PIPE_NOTIFY (1 << 4)
2356#define GT_SYNC_STATUS (1 << 2) 2368#define GT_SYNC_STATUS (1 << 2)
2357#define GT_USER_INTERRUPT (1 << 0) 2369#define GT_USER_INTERRUPT (1 << 0)
2370#define GT_BSD_USER_INTERRUPT (1 << 5)
2371
2358 2372
2359#define GTISR 0x44010 2373#define GTISR 0x44010
2360#define GTIMR 0x44014 2374#define GTIMR 0x44014
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5715c4d8cce9..f6b84fe8099a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -340,6 +340,119 @@ static void render_setup_status_page(struct drm_device *dev,
340 340
341} 341}
342 342
343void
344bsd_ring_flush(struct drm_device *dev,
345 struct intel_ring_buffer *ring,
346 u32 invalidate_domains,
347 u32 flush_domains)
348{
349 intel_ring_begin(dev, ring, 8);
350 intel_ring_emit(dev, ring, MI_FLUSH);
351 intel_ring_emit(dev, ring, MI_NOOP);
352 intel_ring_advance(dev, ring);
353}
354
355static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
356 struct intel_ring_buffer *ring)
357{
358 drm_i915_private_t *dev_priv = dev->dev_private;
359 return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
360}
361
362static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
363 struct intel_ring_buffer *ring)
364{
365 drm_i915_private_t *dev_priv = dev->dev_private;
366 return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
367}
368
369static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
370 struct intel_ring_buffer *ring)
371{
372 drm_i915_private_t *dev_priv = dev->dev_private;
373 return I915_READ(BSD_RING_ACTHD);
374}
375
376static inline void bsd_ring_advance_ring(struct drm_device *dev,
377 struct intel_ring_buffer *ring)
378{
379 drm_i915_private_t *dev_priv = dev->dev_private;
380 I915_WRITE(BSD_RING_TAIL, ring->tail);
381}
382
383static int init_bsd_ring(struct drm_device *dev,
384 struct intel_ring_buffer *ring)
385{
386 return init_ring_common(dev, ring);
387}
388
389static u32
390bsd_ring_add_request(struct drm_device *dev,
391 struct intel_ring_buffer *ring,
392 struct drm_file *file_priv,
393 u32 flush_domains)
394{
395 u32 seqno;
396 seqno = intel_ring_get_seqno(dev, ring);
397 intel_ring_begin(dev, ring, 4);
398 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
399 intel_ring_emit(dev, ring,
400 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
401 intel_ring_emit(dev, ring, seqno);
402 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
403 intel_ring_advance(dev, ring);
404
405 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
406
407 return seqno;
408}
409
410static void bsd_setup_status_page(struct drm_device *dev,
411 struct intel_ring_buffer *ring)
412{
413 drm_i915_private_t *dev_priv = dev->dev_private;
414 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
415 I915_READ(BSD_HWS_PGA);
416}
417
418static void
419bsd_ring_get_user_irq(struct drm_device *dev,
420 struct intel_ring_buffer *ring)
421{
422 /* do nothing */
423}
424static void
425bsd_ring_put_user_irq(struct drm_device *dev,
426 struct intel_ring_buffer *ring)
427{
428 /* do nothing */
429}
430
431static u32
432bsd_ring_get_gem_seqno(struct drm_device *dev,
433 struct intel_ring_buffer *ring)
434{
435 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
436}
437
438static int
439bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
440 struct intel_ring_buffer *ring,
441 struct drm_i915_gem_execbuffer2 *exec,
442 struct drm_clip_rect *cliprects,
443 uint64_t exec_offset)
444{
445 uint32_t exec_start;
446 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
447 intel_ring_begin(dev, ring, 2);
448 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
449 (2 << 6) | MI_BATCH_NON_SECURE_I965);
450 intel_ring_emit(dev, ring, exec_start);
451 intel_ring_advance(dev, ring);
452 return 0;
453}
454
455
343static int 456static int
344render_ring_dispatch_gem_execbuffer(struct drm_device *dev, 457render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
345 struct intel_ring_buffer *ring, 458 struct intel_ring_buffer *ring,
@@ -588,6 +701,7 @@ int intel_wait_ring_buffer(struct drm_device *dev,
588 if (master_priv->sarea_priv) 701 if (master_priv->sarea_priv)
589 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 702 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
590 } 703 }
704
591 yield(); 705 yield();
592 } while (!time_after(jiffies, end)); 706 } while (!time_after(jiffies, end));
593 trace_i915_ring_wait_end (dev); 707 trace_i915_ring_wait_end (dev);
@@ -682,3 +796,42 @@ struct intel_ring_buffer render_ring = {
682 .status_page = {NULL, 0, NULL}, 796 .status_page = {NULL, 0, NULL},
683 .map = {0,} 797 .map = {0,}
684}; 798};
799
800/* ring buffer for bit-stream decoder */
801
802struct intel_ring_buffer bsd_ring = {
803 .name = "bsd ring",
804 .regs = {
805 .ctl = BSD_RING_CTL,
806 .head = BSD_RING_HEAD,
807 .tail = BSD_RING_TAIL,
808 .start = BSD_RING_START
809 },
810 .ring_flag = I915_EXEC_BSD,
811 .size = 32 * PAGE_SIZE,
812 .alignment = PAGE_SIZE,
813 .virtual_start = NULL,
814 .dev = NULL,
815 .gem_object = NULL,
816 .head = 0,
817 .tail = 0,
818 .space = 0,
819 .next_seqno = 1,
820 .user_irq_refcount = 0,
821 .irq_gem_seqno = 0,
822 .waiting_gem_seqno = 0,
823 .setup_status_page = bsd_setup_status_page,
824 .init = init_bsd_ring,
825 .get_head = bsd_ring_get_head,
826 .get_tail = bsd_ring_get_tail,
827 .get_active_head = bsd_ring_get_active_head,
828 .advance_ring = bsd_ring_advance_ring,
829 .flush = bsd_ring_flush,
830 .add_request = bsd_ring_add_request,
831 .get_gem_seqno = bsd_ring_get_gem_seqno,
832 .user_irq_get = bsd_ring_get_user_irq,
833 .user_irq_put = bsd_ring_put_user_irq,
834 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
835 .status_page = {NULL, 0, NULL},
836 .map = {0,}
837};