aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorZou Nan hai <nanhai.zou@intel.com>2010-05-20 21:08:55 -0400
committerEric Anholt <eric@anholt.net>2010-05-26 16:24:49 -0400
commit8187a2b70e34c727a06617441f74f202b6fefaf9 (patch)
tree48622c6f95282dc0a0fa668110aac4efa6e89066 /drivers/gpu/drm/i915/i915_gem.c
parentd3301d86b4bf2bcf649982ae464211d8bcf9575a (diff)
drm/i915: introduce intel_ring_buffer structure (V2)
Introduces a more complete intel_ring_buffer structure with callbacks for setup and management of a particular ringbuffer, and converts the render ring buffer consumers to use it. Signed-off-by: Zou Nan hai <nanhai.zou@intel.com> Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com> [anholt: Fixed up whitespace fail and rebased against prep patches] Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c76
1 files changed, 65 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 95dbe5628a25..58b6e814fae1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1590,6 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1590 } 1590 }
1591 } 1591 }
1592} 1592}
1593
1593uint32_t 1594uint32_t
1594i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1595i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1595 uint32_t flush_domains) 1596 uint32_t flush_domains)
@@ -1607,7 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1607 if (request == NULL) 1608 if (request == NULL)
1608 return 0; 1609 return 0;
1609 1610
1610 seqno = i915_ring_add_request(dev); 1611 seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring,
1612 file_priv, flush_domains);
1611 1613
1612 DRM_DEBUG_DRIVER("%d\n", seqno); 1614 DRM_DEBUG_DRIVER("%d\n", seqno);
1613 1615
@@ -1645,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1645static uint32_t 1647static uint32_t
1646i915_retire_commands(struct drm_device *dev) 1648i915_retire_commands(struct drm_device *dev)
1647{ 1649{
1648 drm_i915_private_t *dev_priv = dev->dev_private;
1649 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 1650 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1650 uint32_t flush_domains = 0; 1651 uint32_t flush_domains = 0;
1651 RING_LOCALS;
1652 1652
1653 /* The sampler always gets flushed on i965 (sigh) */ 1653 /* The sampler always gets flushed on i965 (sigh) */
1654 if (IS_I965G(dev)) 1654 if (IS_I965G(dev))
@@ -1746,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev)
1746 drm_i915_private_t *dev_priv = dev->dev_private; 1746 drm_i915_private_t *dev_priv = dev->dev_private;
1747 uint32_t seqno; 1747 uint32_t seqno;
1748 1748
1749 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) 1749 struct intel_ring_buffer *ring = &(dev_priv->render_ring);
1750 if (!ring->status_page.page_addr
1751 || list_empty(&dev_priv->mm.request_list))
1750 return; 1752 return;
1751 1753
1752 seqno = i915_get_gem_seqno(dev); 1754 seqno = i915_get_gem_seqno(dev);
@@ -1773,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev)
1773 1775
1774 if (unlikely (dev_priv->trace_irq_seqno && 1776 if (unlikely (dev_priv->trace_irq_seqno &&
1775 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1777 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1776 i915_user_irq_put(dev); 1778
1779 ring->user_irq_put(dev, ring);
1777 dev_priv->trace_irq_seqno = 0; 1780 dev_priv->trace_irq_seqno = 0;
1778 } 1781 }
1779} 1782}
@@ -1803,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1803 u32 ier; 1806 u32 ier;
1804 int ret = 0; 1807 int ret = 0;
1805 1808
1809 struct intel_ring_buffer *ring = &dev_priv->render_ring;
1806 BUG_ON(seqno == 0); 1810 BUG_ON(seqno == 0);
1807 1811
1808 if (atomic_read(&dev_priv->mm.wedged)) 1812 if (atomic_read(&dev_priv->mm.wedged))
@@ -1823,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1823 trace_i915_gem_request_wait_begin(dev, seqno); 1827 trace_i915_gem_request_wait_begin(dev, seqno);
1824 1828
1825 dev_priv->mm.waiting_gem_seqno = seqno; 1829 dev_priv->mm.waiting_gem_seqno = seqno;
1826 i915_user_irq_get(dev); 1830 ring->user_irq_get(dev, ring);
1827 if (interruptible) 1831 if (interruptible)
1828 ret = wait_event_interruptible(dev_priv->irq_queue, 1832 ret = wait_event_interruptible(dev_priv->irq_queue,
1829 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1833 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
@@ -1833,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1833 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1837 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1834 atomic_read(&dev_priv->mm.wedged)); 1838 atomic_read(&dev_priv->mm.wedged));
1835 1839
1836 i915_user_irq_put(dev); 1840 ring->user_irq_put(dev, ring);
1837 dev_priv->mm.waiting_gem_seqno = 0; 1841 dev_priv->mm.waiting_gem_seqno = 0;
1838 1842
1839 trace_i915_gem_request_wait_end(dev, seqno); 1843 trace_i915_gem_request_wait_end(dev, seqno);
@@ -1867,6 +1871,19 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1867} 1871}
1868 1872
1869 1873
1874static void
1875i915_gem_flush(struct drm_device *dev,
1876 uint32_t invalidate_domains,
1877 uint32_t flush_domains)
1878{
1879 drm_i915_private_t *dev_priv = dev->dev_private;
1880 if (flush_domains & I915_GEM_DOMAIN_CPU)
1881 drm_agp_chipset_flush(dev);
1882 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1883 invalidate_domains,
1884 flush_domains);
1885}
1886
1870/** 1887/**
1871 * Ensures that all rendering to the object has completed and the object is 1888 * Ensures that all rendering to the object has completed and the object is
1872 * safe to unbind from the GTT or access from the CPU. 1889 * safe to unbind from the GTT or access from the CPU.
@@ -3820,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3820#endif 3837#endif
3821 3838
3822 /* Exec the batchbuffer */ 3839 /* Exec the batchbuffer */
3823 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); 3840 ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev,
3841 &dev_priv->render_ring,
3842 args,
3843 cliprects,
3844 exec_offset);
3824 if (ret) { 3845 if (ret) {
3825 DRM_ERROR("dispatch failed %d\n", ret); 3846 DRM_ERROR("dispatch failed %d\n", ret);
3826 goto err; 3847 goto err;
@@ -4378,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev)
4378 4399
4379 mutex_lock(&dev->struct_mutex); 4400 mutex_lock(&dev->struct_mutex);
4380 4401
4381 if (dev_priv->mm.suspended || dev_priv->render_ring.ring_obj == NULL) { 4402 if (dev_priv->mm.suspended ||
4403 dev_priv->render_ring.gem_object == NULL) {
4382 mutex_unlock(&dev->struct_mutex); 4404 mutex_unlock(&dev->struct_mutex);
4383 return 0; 4405 return 0;
4384 } 4406 }
@@ -4420,7 +4442,7 @@ i915_gem_idle(struct drm_device *dev)
4420 * 965+ support PIPE_CONTROL commands, which provide finer grained control 4442 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4421 * over cache flushing. 4443 * over cache flushing.
4422 */ 4444 */
4423int 4445static int
4424i915_gem_init_pipe_control(struct drm_device *dev) 4446i915_gem_init_pipe_control(struct drm_device *dev)
4425{ 4447{
4426 drm_i915_private_t *dev_priv = dev->dev_private; 4448 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4459,7 +4481,8 @@ err:
4459 return ret; 4481 return ret;
4460} 4482}
4461 4483
4462void 4484
4485static void
4463i915_gem_cleanup_pipe_control(struct drm_device *dev) 4486i915_gem_cleanup_pipe_control(struct drm_device *dev)
4464{ 4487{
4465 drm_i915_private_t *dev_priv = dev->dev_private; 4488 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4477,6 +4500,37 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
4477} 4500}
4478 4501
4479int 4502int
4503i915_gem_init_ringbuffer(struct drm_device *dev)
4504{
4505 drm_i915_private_t *dev_priv = dev->dev_private;
4506 int ret;
4507 dev_priv->render_ring = render_ring;
4508 if (!I915_NEED_GFX_HWS(dev)) {
4509 dev_priv->render_ring.status_page.page_addr
4510 = dev_priv->status_page_dmah->vaddr;
4511 memset(dev_priv->render_ring.status_page.page_addr,
4512 0, PAGE_SIZE);
4513 }
4514 if (HAS_PIPE_CONTROL(dev)) {
4515 ret = i915_gem_init_pipe_control(dev);
4516 if (ret)
4517 return ret;
4518 }
4519 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4520 return ret;
4521}
4522
4523void
4524i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4525{
4526 drm_i915_private_t *dev_priv = dev->dev_private;
4527
4528 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4529 if (HAS_PIPE_CONTROL(dev))
4530 i915_gem_cleanup_pipe_control(dev);
4531}
4532
4533int
4480i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 4534i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4481 struct drm_file *file_priv) 4535 struct drm_file *file_priv)
4482{ 4536{