aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZou Nan hai <nanhai.zou@intel.com>2010-05-20 21:08:56 -0400
committerEric Anholt <eric@anholt.net>2010-05-26 16:42:11 -0400
commit852835f343146a82a528c3b712b373661d4fa17a (patch)
tree07626a99af8f4b400f4e8616aea885c9f73a118a
parent8187a2b70e34c727a06617441f74f202b6fefaf9 (diff)
drm/i915: convert some gem structures to per-ring V2
The active list and request list move into the ringbuffer structure, so each can track its active objects in the order they are in that ring. The flushing list does not, as it doesn't matter which ring caused data to end up in the render cache. Objects gain a pointer to the ring they are active on (if any). Signed-off-by: Zou Nan hai <nanhai.zou@intel.com> Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com> Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h39
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c210
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c45
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c44
7 files changed, 207 insertions, 153 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4fddf094deb2..c864858d5064 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
77 case ACTIVE_LIST: 77 case ACTIVE_LIST:
78 seq_printf(m, "Active:\n"); 78 seq_printf(m, "Active:\n");
79 lock = &dev_priv->mm.active_list_lock; 79 lock = &dev_priv->mm.active_list_lock;
80 head = &dev_priv->mm.active_list; 80 head = &dev_priv->render_ring.active_list;
81 break; 81 break;
82 case INACTIVE_LIST: 82 case INACTIVE_LIST:
83 seq_printf(m, "Inactive:\n"); 83 seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
129 struct drm_i915_gem_request *gem_request; 129 struct drm_i915_gem_request *gem_request;
130 130
131 seq_printf(m, "Request:\n"); 131 seq_printf(m, "Request:\n");
132 list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { 132 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
133 list) {
133 seq_printf(m, " %d @ %d\n", 134 seq_printf(m, " %d @ %d\n",
134 gem_request->seqno, 135 gem_request->seqno,
135 (int) (jiffies - gem_request->emitted_jiffies)); 136 (int) (jiffies - gem_request->emitted_jiffies));
@@ -145,7 +146,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
145 146
146 if (dev_priv->hw_status_page != NULL) { 147 if (dev_priv->hw_status_page != NULL) {
147 seq_printf(m, "Current sequence: %d\n", 148 seq_printf(m, "Current sequence: %d\n",
148 i915_get_gem_seqno(dev)); 149 i915_get_gem_seqno(dev, &dev_priv->render_ring));
149 } else { 150 } else {
150 seq_printf(m, "Current sequence: hws uninitialized\n"); 151 seq_printf(m, "Current sequence: hws uninitialized\n");
151 } 152 }
@@ -197,7 +198,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
197 atomic_read(&dev_priv->irq_received)); 198 atomic_read(&dev_priv->irq_received));
198 if (dev_priv->hw_status_page != NULL) { 199 if (dev_priv->hw_status_page != NULL) {
199 seq_printf(m, "Current sequence: %d\n", 200 seq_printf(m, "Current sequence: %d\n",
200 i915_get_gem_seqno(dev)); 201 i915_get_gem_seqno(dev, &dev_priv->render_ring));
201 } else { 202 } else {
202 seq_printf(m, "Current sequence: hws uninitialized\n"); 203 seq_printf(m, "Current sequence: hws uninitialized\n");
203 } 204 }
@@ -287,7 +288,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
287 288
288 spin_lock(&dev_priv->mm.active_list_lock); 289 spin_lock(&dev_priv->mm.active_list_lock);
289 290
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 291 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
292 list) {
291 obj = &obj_priv->base; 293 obj = &obj_priv->base;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 294 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj, 0); 295 ret = i915_gem_object_get_pages(obj, 0);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2541428b2fe5..f485880300ce 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -82,7 +82,8 @@ static void i915_free_hws(struct drm_device *dev)
82 dev_priv->status_page_dmah = NULL; 82 dev_priv->status_page_dmah = NULL;
83 } 83 }
84 84
85 if (dev_priv->status_gfx_addr) { 85 if (dev_priv->render_ring.status_page.gfx_addr) {
86 dev_priv->render_ring.status_page.gfx_addr = 0;
86 dev_priv->status_gfx_addr = 0; 87 dev_priv->status_gfx_addr = 0;
87 drm_core_ioremapfree(&dev_priv->hws_map, dev); 88 drm_core_ioremapfree(&dev_priv->hws_map, dev);
88 } 89 }
@@ -835,9 +836,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
835 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 836 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
836 837
837 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 838 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
838 dev_priv->status_gfx_addr); 839 dev_priv->status_gfx_addr);
839 DRM_DEBUG_DRIVER("load hws at %p\n", 840 DRM_DEBUG_DRIVER("load hws at %p\n",
840 dev_priv->hw_status_page); 841 dev_priv->hw_status_page);
841 return 0; 842 return 0;
842} 843}
843 844
@@ -1510,7 +1511,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1510 resource_size_t base, size; 1511 resource_size_t base, size;
1511 int ret = 0, mmio_bar; 1512 int ret = 0, mmio_bar;
1512 uint32_t agp_size, prealloc_size, prealloc_start; 1513 uint32_t agp_size, prealloc_size, prealloc_start;
1513
1514 /* i915 has 4 more counters */ 1514 /* i915 has 4 more counters */
1515 dev->counters += 4; 1515 dev->counters += 4;
1516 dev->types[6] = _DRM_STAT_IRQ; 1516 dev->types[6] = _DRM_STAT_IRQ;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c57c54f403da..d40f62d36453 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
340 /* 340 /*
341 * Clear request list 341 * Clear request list
342 */ 342 */
343 i915_gem_retire_requests(dev); 343 i915_gem_retire_requests(dev, &dev_priv->render_ring);
344 344
345 if (need_display) 345 if (need_display)
346 i915_save_display(dev); 346 i915_save_display(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6bb7933d49dc..3f35989ba74c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -505,18 +505,7 @@ typedef struct drm_i915_private {
505 */ 505 */
506 struct list_head shrink_list; 506 struct list_head shrink_list;
507 507
508 /**
509 * List of objects currently involved in rendering from the
510 * ringbuffer.
511 *
512 * Includes buffers having the contents of their GPU caches
513 * flushed, not necessarily primitives. last_rendering_seqno
514 * represents when the rendering involved will be completed.
515 *
516 * A reference is held on the buffer while on this list.
517 */
518 spinlock_t active_list_lock; 508 spinlock_t active_list_lock;
519 struct list_head active_list;
520 509
521 /** 510 /**
522 * List of objects which are not in the ringbuffer but which 511 * List of objects which are not in the ringbuffer but which
@@ -554,12 +543,6 @@ typedef struct drm_i915_private {
554 struct list_head fence_list; 543 struct list_head fence_list;
555 544
556 /** 545 /**
557 * List of breadcrumbs associated with GPU requests currently
558 * outstanding.
559 */
560 struct list_head request_list;
561
562 /**
563 * We leave the user IRQ off as much as possible, 546 * We leave the user IRQ off as much as possible,
564 * but this means that requests will finish and never 547 * but this means that requests will finish and never
565 * be retired once the system goes idle. Set a timer to 548 * be retired once the system goes idle. Set a timer to
@@ -683,6 +666,9 @@ struct drm_i915_gem_object {
683 */ 666 */
684 uint32_t gtt_offset; 667 uint32_t gtt_offset;
685 668
669 /* Which ring is refering to is this object */
670 struct intel_ring_buffer *ring;
671
686 /** 672 /**
687 * Fake offset for use by mmap(2) 673 * Fake offset for use by mmap(2)
688 */ 674 */
@@ -756,6 +742,9 @@ struct drm_i915_gem_object {
756 * an emission time with seqnos for tracking how far ahead of the GPU we are. 742 * an emission time with seqnos for tracking how far ahead of the GPU we are.
757 */ 743 */
758struct drm_i915_gem_request { 744struct drm_i915_gem_request {
745 /** On Which ring this request was generated */
746 struct intel_ring_buffer *ring;
747
759 /** GEM sequence number associated with this request. */ 748 /** GEM sequence number associated with this request. */
760 uint32_t seqno; 749 uint32_t seqno;
761 750
@@ -916,11 +905,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
916int i915_gem_object_unbind(struct drm_gem_object *obj); 905int i915_gem_object_unbind(struct drm_gem_object *obj);
917void i915_gem_release_mmap(struct drm_gem_object *obj); 906void i915_gem_release_mmap(struct drm_gem_object *obj);
918void i915_gem_lastclose(struct drm_device *dev); 907void i915_gem_lastclose(struct drm_device *dev);
919uint32_t i915_get_gem_seqno(struct drm_device *dev); 908uint32_t i915_get_gem_seqno(struct drm_device *dev,
909 struct intel_ring_buffer *ring);
920bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 910bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
921int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 911int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
922int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 912int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
923void i915_gem_retire_requests(struct drm_device *dev); 913void i915_gem_retire_requests(struct drm_device *dev,
914 struct intel_ring_buffer *ring);
924void i915_gem_retire_work_handler(struct work_struct *work); 915void i915_gem_retire_work_handler(struct work_struct *work);
925void i915_gem_clflush_object(struct drm_gem_object *obj); 916void i915_gem_clflush_object(struct drm_gem_object *obj);
926int i915_gem_object_set_domain(struct drm_gem_object *obj, 917int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -931,9 +922,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
931int i915_gem_do_init(struct drm_device *dev, unsigned long start, 922int i915_gem_do_init(struct drm_device *dev, unsigned long start,
932 unsigned long end); 923 unsigned long end);
933int i915_gem_idle(struct drm_device *dev); 924int i915_gem_idle(struct drm_device *dev);
934uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 925uint32_t i915_add_request(struct drm_device *dev,
935 uint32_t flush_domains); 926 struct drm_file *file_priv,
936int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); 927 uint32_t flush_domains,
928 struct intel_ring_buffer *ring);
929int i915_do_wait_request(struct drm_device *dev,
930 uint32_t seqno, int interruptible,
931 struct intel_ring_buffer *ring);
937int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 932int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
938int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 933int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
939 int write); 934 int write);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58b6e814fae1..af664ba923c5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1482,11 +1482,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1482} 1482}
1483 1483
1484static void 1484static void
1485i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) 1485i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1486 struct intel_ring_buffer *ring)
1486{ 1487{
1487 struct drm_device *dev = obj->dev; 1488 struct drm_device *dev = obj->dev;
1488 drm_i915_private_t *dev_priv = dev->dev_private; 1489 drm_i915_private_t *dev_priv = dev->dev_private;
1489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1490 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1491 BUG_ON(ring == NULL);
1492 obj_priv->ring = ring;
1490 1493
1491 /* Add a reference if we're newly entering the active list. */ 1494 /* Add a reference if we're newly entering the active list. */
1492 if (!obj_priv->active) { 1495 if (!obj_priv->active) {
@@ -1495,8 +1498,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1495 } 1498 }
1496 /* Move from whatever list we were on to the tail of execution. */ 1499 /* Move from whatever list we were on to the tail of execution. */
1497 spin_lock(&dev_priv->mm.active_list_lock); 1500 spin_lock(&dev_priv->mm.active_list_lock);
1498 list_move_tail(&obj_priv->list, 1501 list_move_tail(&obj_priv->list, &ring->active_list);
1499 &dev_priv->mm.active_list);
1500 spin_unlock(&dev_priv->mm.active_list_lock); 1502 spin_unlock(&dev_priv->mm.active_list_lock);
1501 obj_priv->last_rendering_seqno = seqno; 1503 obj_priv->last_rendering_seqno = seqno;
1502} 1504}
@@ -1549,6 +1551,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1549 BUG_ON(!list_empty(&obj_priv->gpu_write_list)); 1551 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1550 1552
1551 obj_priv->last_rendering_seqno = 0; 1553 obj_priv->last_rendering_seqno = 0;
1554 obj_priv->ring = NULL;
1552 if (obj_priv->active) { 1555 if (obj_priv->active) {
1553 obj_priv->active = 0; 1556 obj_priv->active = 0;
1554 drm_gem_object_unreference(obj); 1557 drm_gem_object_unreference(obj);
@@ -1558,7 +1561,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1558 1561
1559static void 1562static void
1560i915_gem_process_flushing_list(struct drm_device *dev, 1563i915_gem_process_flushing_list(struct drm_device *dev,
1561 uint32_t flush_domains, uint32_t seqno) 1564 uint32_t flush_domains, uint32_t seqno,
1565 struct intel_ring_buffer *ring)
1562{ 1566{
1563 drm_i915_private_t *dev_priv = dev->dev_private; 1567 drm_i915_private_t *dev_priv = dev->dev_private;
1564 struct drm_i915_gem_object *obj_priv, *next; 1568 struct drm_i915_gem_object *obj_priv, *next;
@@ -1569,12 +1573,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1569 struct drm_gem_object *obj = &obj_priv->base; 1573 struct drm_gem_object *obj = &obj_priv->base;
1570 1574
1571 if ((obj->write_domain & flush_domains) == 1575 if ((obj->write_domain & flush_domains) ==
1572 obj->write_domain) { 1576 obj->write_domain &&
1577 obj_priv->ring->ring_flag == ring->ring_flag) {
1573 uint32_t old_write_domain = obj->write_domain; 1578 uint32_t old_write_domain = obj->write_domain;
1574 1579
1575 obj->write_domain = 0; 1580 obj->write_domain = 0;
1576 list_del_init(&obj_priv->gpu_write_list); 1581 list_del_init(&obj_priv->gpu_write_list);
1577 i915_gem_object_move_to_active(obj, seqno); 1582 i915_gem_object_move_to_active(obj, seqno, ring);
1578 1583
1579 /* update the fence lru list */ 1584 /* update the fence lru list */
1580 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 1585 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,7 +1598,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1593 1598
1594uint32_t 1599uint32_t
1595i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1600i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1596 uint32_t flush_domains) 1601 uint32_t flush_domains, struct intel_ring_buffer *ring)
1597{ 1602{
1598 drm_i915_private_t *dev_priv = dev->dev_private; 1603 drm_i915_private_t *dev_priv = dev->dev_private;
1599 struct drm_i915_file_private *i915_file_priv = NULL; 1604 struct drm_i915_file_private *i915_file_priv = NULL;
@@ -1608,15 +1613,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1608 if (request == NULL) 1613 if (request == NULL)
1609 return 0; 1614 return 0;
1610 1615
1611 seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring, 1616 seqno = ring->add_request(dev, ring, file_priv, flush_domains);
1612 file_priv, flush_domains);
1613
1614 DRM_DEBUG_DRIVER("%d\n", seqno);
1615 1617
1616 request->seqno = seqno; 1618 request->seqno = seqno;
1619 request->ring = ring;
1617 request->emitted_jiffies = jiffies; 1620 request->emitted_jiffies = jiffies;
1618 was_empty = list_empty(&dev_priv->mm.request_list); 1621 was_empty = list_empty(&ring->request_list);
1619 list_add_tail(&request->list, &dev_priv->mm.request_list); 1622 list_add_tail(&request->list, &ring->request_list);
1623
1620 if (i915_file_priv) { 1624 if (i915_file_priv) {
1621 list_add_tail(&request->client_list, 1625 list_add_tail(&request->client_list,
1622 &i915_file_priv->mm.request_list); 1626 &i915_file_priv->mm.request_list);
@@ -1628,7 +1632,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1628 * domain we're flushing with our flush. 1632 * domain we're flushing with our flush.
1629 */ 1633 */
1630 if (flush_domains != 0) 1634 if (flush_domains != 0)
1631 i915_gem_process_flushing_list(dev, flush_domains, seqno); 1635 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
1632 1636
1633 if (!dev_priv->mm.suspended) { 1637 if (!dev_priv->mm.suspended) {
1634 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1638 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1645,18 +1649,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1645 * before signalling the CPU 1649 * before signalling the CPU
1646 */ 1650 */
1647static uint32_t 1651static uint32_t
1648i915_retire_commands(struct drm_device *dev) 1652i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1649{ 1653{
1650 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1651 uint32_t flush_domains = 0; 1654 uint32_t flush_domains = 0;
1652 1655
1653 /* The sampler always gets flushed on i965 (sigh) */ 1656 /* The sampler always gets flushed on i965 (sigh) */
1654 if (IS_I965G(dev)) 1657 if (IS_I965G(dev))
1655 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 1658 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1656 BEGIN_LP_RING(2); 1659
1657 OUT_RING(cmd); 1660 ring->flush(dev, ring,
1658 OUT_RING(0); /* noop */ 1661 I915_GEM_DOMAIN_COMMAND, flush_domains);
1659 ADVANCE_LP_RING();
1660 return flush_domains; 1662 return flush_domains;
1661} 1663}
1662 1664
@@ -1676,11 +1678,11 @@ i915_gem_retire_request(struct drm_device *dev,
1676 * by the ringbuffer to the flushing/inactive lists as appropriate. 1678 * by the ringbuffer to the flushing/inactive lists as appropriate.
1677 */ 1679 */
1678 spin_lock(&dev_priv->mm.active_list_lock); 1680 spin_lock(&dev_priv->mm.active_list_lock);
1679 while (!list_empty(&dev_priv->mm.active_list)) { 1681 while (!list_empty(&request->ring->active_list)) {
1680 struct drm_gem_object *obj; 1682 struct drm_gem_object *obj;
1681 struct drm_i915_gem_object *obj_priv; 1683 struct drm_i915_gem_object *obj_priv;
1682 1684
1683 obj_priv = list_first_entry(&dev_priv->mm.active_list, 1685 obj_priv = list_first_entry(&request->ring->active_list,
1684 struct drm_i915_gem_object, 1686 struct drm_i915_gem_object,
1685 list); 1687 list);
1686 obj = &obj_priv->base; 1688 obj = &obj_priv->base;
@@ -1727,37 +1729,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1727} 1729}
1728 1730
1729uint32_t 1731uint32_t
1730i915_get_gem_seqno(struct drm_device *dev) 1732i915_get_gem_seqno(struct drm_device *dev,
1733 struct intel_ring_buffer *ring)
1731{ 1734{
1732 drm_i915_private_t *dev_priv = dev->dev_private; 1735 return ring->get_gem_seqno(dev, ring);
1733
1734 if (HAS_PIPE_CONTROL(dev))
1735 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1736 else
1737 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1738} 1736}
1739 1737
1740/** 1738/**
1741 * This function clears the request list as sequence numbers are passed. 1739 * This function clears the request list as sequence numbers are passed.
1742 */ 1740 */
1743void 1741void
1744i915_gem_retire_requests(struct drm_device *dev) 1742i915_gem_retire_requests(struct drm_device *dev,
1743 struct intel_ring_buffer *ring)
1745{ 1744{
1746 drm_i915_private_t *dev_priv = dev->dev_private; 1745 drm_i915_private_t *dev_priv = dev->dev_private;
1747 uint32_t seqno; 1746 uint32_t seqno;
1748 1747
1749 struct intel_ring_buffer *ring = &(dev_priv->render_ring);
1750 if (!ring->status_page.page_addr 1748 if (!ring->status_page.page_addr
1751 || list_empty(&dev_priv->mm.request_list)) 1749 || list_empty(&ring->request_list))
1752 return; 1750 return;
1753 1751
1754 seqno = i915_get_gem_seqno(dev); 1752 seqno = i915_get_gem_seqno(dev, ring);
1755 1753
1756 while (!list_empty(&dev_priv->mm.request_list)) { 1754 while (!list_empty(&ring->request_list)) {
1757 struct drm_i915_gem_request *request; 1755 struct drm_i915_gem_request *request;
1758 uint32_t retiring_seqno; 1756 uint32_t retiring_seqno;
1759 1757
1760 request = list_first_entry(&dev_priv->mm.request_list, 1758 request = list_first_entry(&ring->request_list,
1761 struct drm_i915_gem_request, 1759 struct drm_i915_gem_request,
1762 list); 1760 list);
1763 retiring_seqno = request->seqno; 1761 retiring_seqno = request->seqno;
@@ -1792,27 +1790,28 @@ i915_gem_retire_work_handler(struct work_struct *work)
1792 dev = dev_priv->dev; 1790 dev = dev_priv->dev;
1793 1791
1794 mutex_lock(&dev->struct_mutex); 1792 mutex_lock(&dev->struct_mutex);
1795 i915_gem_retire_requests(dev); 1793 i915_gem_retire_requests(dev, &dev_priv->render_ring);
1794
1796 if (!dev_priv->mm.suspended && 1795 if (!dev_priv->mm.suspended &&
1797 !list_empty(&dev_priv->mm.request_list)) 1796 (!list_empty(&dev_priv->render_ring.request_list)))
1798 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1797 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1799 mutex_unlock(&dev->struct_mutex); 1798 mutex_unlock(&dev->struct_mutex);
1800} 1799}
1801 1800
1802int 1801int
1803i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) 1802i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1803 int interruptible, struct intel_ring_buffer *ring)
1804{ 1804{
1805 drm_i915_private_t *dev_priv = dev->dev_private; 1805 drm_i915_private_t *dev_priv = dev->dev_private;
1806 u32 ier; 1806 u32 ier;
1807 int ret = 0; 1807 int ret = 0;
1808 1808
1809 struct intel_ring_buffer *ring = &dev_priv->render_ring;
1810 BUG_ON(seqno == 0); 1809 BUG_ON(seqno == 0);
1811 1810
1812 if (atomic_read(&dev_priv->mm.wedged)) 1811 if (atomic_read(&dev_priv->mm.wedged))
1813 return -EIO; 1812 return -EIO;
1814 1813
1815 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1814 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
1816 if (HAS_PCH_SPLIT(dev)) 1815 if (HAS_PCH_SPLIT(dev))
1817 ier = I915_READ(DEIER) | I915_READ(GTIER); 1816 ier = I915_READ(DEIER) | I915_READ(GTIER);
1818 else 1817 else
@@ -1826,19 +1825,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1826 1825
1827 trace_i915_gem_request_wait_begin(dev, seqno); 1826 trace_i915_gem_request_wait_begin(dev, seqno);
1828 1827
1829 dev_priv->mm.waiting_gem_seqno = seqno; 1828 ring->waiting_gem_seqno = seqno;
1830 ring->user_irq_get(dev, ring); 1829 ring->user_irq_get(dev, ring);
1831 if (interruptible) 1830 if (interruptible)
1832 ret = wait_event_interruptible(dev_priv->irq_queue, 1831 ret = wait_event_interruptible(ring->irq_queue,
1833 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1832 i915_seqno_passed(
1834 atomic_read(&dev_priv->mm.wedged)); 1833 ring->get_gem_seqno(dev, ring), seqno)
1834 || atomic_read(&dev_priv->mm.wedged));
1835 else 1835 else
1836 wait_event(dev_priv->irq_queue, 1836 wait_event(ring->irq_queue,
1837 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1837 i915_seqno_passed(
1838 atomic_read(&dev_priv->mm.wedged)); 1838 ring->get_gem_seqno(dev, ring), seqno)
1839 || atomic_read(&dev_priv->mm.wedged));
1839 1840
1840 ring->user_irq_put(dev, ring); 1841 ring->user_irq_put(dev, ring);
1841 dev_priv->mm.waiting_gem_seqno = 0; 1842 ring->waiting_gem_seqno = 0;
1842 1843
1843 trace_i915_gem_request_wait_end(dev, seqno); 1844 trace_i915_gem_request_wait_end(dev, seqno);
1844 } 1845 }
@@ -1847,7 +1848,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1847 1848
1848 if (ret && ret != -ERESTARTSYS) 1849 if (ret && ret != -ERESTARTSYS)
1849 DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 1850 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1850 __func__, ret, seqno, i915_get_gem_seqno(dev)); 1851 __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
1851 1852
1852 /* Directly dispatch request retiring. While we have the work queue 1853 /* Directly dispatch request retiring. While we have the work queue
1853 * to handle this, the waiter on a request often wants an associated 1854 * to handle this, the waiter on a request often wants an associated
@@ -1855,7 +1856,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1855 * a separate wait queue to handle that. 1856 * a separate wait queue to handle that.
1856 */ 1857 */
1857 if (ret == 0) 1858 if (ret == 0)
1858 i915_gem_retire_requests(dev); 1859 i915_gem_retire_requests(dev, ring);
1859 1860
1860 return ret; 1861 return ret;
1861} 1862}
@@ -1865,12 +1866,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1865 * request and object lists appropriately for that event. 1866 * request and object lists appropriately for that event.
1866 */ 1867 */
1867static int 1868static int
1868i915_wait_request(struct drm_device *dev, uint32_t seqno) 1869i915_wait_request(struct drm_device *dev, uint32_t seqno,
1870 struct intel_ring_buffer *ring)
1869{ 1871{
1870 return i915_do_wait_request(dev, seqno, 1); 1872 return i915_do_wait_request(dev, seqno, 1, ring);
1871} 1873}
1872 1874
1873
1874static void 1875static void
1875i915_gem_flush(struct drm_device *dev, 1876i915_gem_flush(struct drm_device *dev,
1876 uint32_t invalidate_domains, 1877 uint32_t invalidate_domains,
@@ -1884,6 +1885,19 @@ i915_gem_flush(struct drm_device *dev,
1884 flush_domains); 1885 flush_domains);
1885} 1886}
1886 1887
1888static void
1889i915_gem_flush_ring(struct drm_device *dev,
1890 uint32_t invalidate_domains,
1891 uint32_t flush_domains,
1892 struct intel_ring_buffer *ring)
1893{
1894 if (flush_domains & I915_GEM_DOMAIN_CPU)
1895 drm_agp_chipset_flush(dev);
1896 ring->flush(dev, ring,
1897 invalidate_domains,
1898 flush_domains);
1899}
1900
1887/** 1901/**
1888 * Ensures that all rendering to the object has completed and the object is 1902 * Ensures that all rendering to the object has completed and the object is
1889 * safe to unbind from the GTT or access from the CPU. 1903 * safe to unbind from the GTT or access from the CPU.
@@ -1908,7 +1922,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1908 DRM_INFO("%s: object %p wait for seqno %08x\n", 1922 DRM_INFO("%s: object %p wait for seqno %08x\n",
1909 __func__, obj, obj_priv->last_rendering_seqno); 1923 __func__, obj, obj_priv->last_rendering_seqno);
1910#endif 1924#endif
1911 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); 1925 ret = i915_wait_request(dev,
1926 obj_priv->last_rendering_seqno, obj_priv->ring);
1912 if (ret != 0) 1927 if (ret != 0)
1913 return ret; 1928 return ret;
1914 } 1929 }
@@ -2025,10 +2040,11 @@ i915_gpu_idle(struct drm_device *dev)
2025 drm_i915_private_t *dev_priv = dev->dev_private; 2040 drm_i915_private_t *dev_priv = dev->dev_private;
2026 bool lists_empty; 2041 bool lists_empty;
2027 uint32_t seqno; 2042 uint32_t seqno;
2043 int ret;
2028 2044
2029 spin_lock(&dev_priv->mm.active_list_lock); 2045 spin_lock(&dev_priv->mm.active_list_lock);
2030 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 2046 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2031 list_empty(&dev_priv->mm.active_list); 2047 list_empty(&dev_priv->render_ring.active_list);
2032 spin_unlock(&dev_priv->mm.active_list_lock); 2048 spin_unlock(&dev_priv->mm.active_list_lock);
2033 2049
2034 if (lists_empty) 2050 if (lists_empty)
@@ -2036,11 +2052,13 @@ i915_gpu_idle(struct drm_device *dev)
2036 2052
2037 /* Flush everything onto the inactive list. */ 2053 /* Flush everything onto the inactive list. */
2038 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2054 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2039 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); 2055 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2056 &dev_priv->render_ring);
2040 if (seqno == 0) 2057 if (seqno == 0)
2041 return -ENOMEM; 2058 return -ENOMEM;
2059 ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
2042 2060
2043 return i915_wait_request(dev, seqno); 2061 return ret;
2044} 2062}
2045 2063
2046static int 2064static int
@@ -2053,7 +2071,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2053 spin_lock(&dev_priv->mm.active_list_lock); 2071 spin_lock(&dev_priv->mm.active_list_lock);
2054 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2072 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2055 list_empty(&dev_priv->mm.flushing_list) && 2073 list_empty(&dev_priv->mm.flushing_list) &&
2056 list_empty(&dev_priv->mm.active_list)); 2074 list_empty(&dev_priv->render_ring.active_list));
2057 spin_unlock(&dev_priv->mm.active_list_lock); 2075 spin_unlock(&dev_priv->mm.active_list_lock);
2058 2076
2059 if (lists_empty) 2077 if (lists_empty)
@@ -2073,7 +2091,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2073 spin_lock(&dev_priv->mm.active_list_lock); 2091 spin_lock(&dev_priv->mm.active_list_lock);
2074 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2092 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2075 list_empty(&dev_priv->mm.flushing_list) && 2093 list_empty(&dev_priv->mm.flushing_list) &&
2076 list_empty(&dev_priv->mm.active_list)); 2094 list_empty(&dev_priv->render_ring.active_list));
2077 spin_unlock(&dev_priv->mm.active_list_lock); 2095 spin_unlock(&dev_priv->mm.active_list_lock);
2078 BUG_ON(!lists_empty); 2096 BUG_ON(!lists_empty);
2079 2097
@@ -2087,8 +2105,9 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2087 struct drm_gem_object *obj; 2105 struct drm_gem_object *obj;
2088 int ret; 2106 int ret;
2089 2107
2108 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2090 for (;;) { 2109 for (;;) {
2091 i915_gem_retire_requests(dev); 2110 i915_gem_retire_requests(dev, render_ring);
2092 2111
2093 /* If there's an inactive buffer available now, grab it 2112 /* If there's an inactive buffer available now, grab it
2094 * and be done. 2113 * and be done.
@@ -2112,14 +2131,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2112 * things, wait for the next to finish and hopefully leave us 2131 * things, wait for the next to finish and hopefully leave us
2113 * a buffer to evict. 2132 * a buffer to evict.
2114 */ 2133 */
2115 if (!list_empty(&dev_priv->mm.request_list)) { 2134 if (!list_empty(&render_ring->request_list)) {
2116 struct drm_i915_gem_request *request; 2135 struct drm_i915_gem_request *request;
2117 2136
2118 request = list_first_entry(&dev_priv->mm.request_list, 2137 request = list_first_entry(&render_ring->request_list,
2119 struct drm_i915_gem_request, 2138 struct drm_i915_gem_request,
2120 list); 2139 list);
2121 2140
2122 ret = i915_wait_request(dev, request->seqno); 2141 ret = i915_wait_request(dev,
2142 request->seqno, request->ring);
2123 if (ret) 2143 if (ret)
2124 return ret; 2144 return ret;
2125 2145
@@ -2146,10 +2166,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2146 if (obj != NULL) { 2166 if (obj != NULL) {
2147 uint32_t seqno; 2167 uint32_t seqno;
2148 2168
2149 i915_gem_flush(dev, 2169 i915_gem_flush_ring(dev,
2170 obj->write_domain,
2150 obj->write_domain, 2171 obj->write_domain,
2151 obj->write_domain); 2172 obj_priv->ring);
2152 seqno = i915_add_request(dev, NULL, obj->write_domain); 2173 seqno = i915_add_request(dev, NULL,
2174 obj->write_domain,
2175 obj_priv->ring);
2153 if (seqno == 0) 2176 if (seqno == 0)
2154 return -ENOMEM; 2177 return -ENOMEM;
2155 continue; 2178 continue;
@@ -2685,6 +2708,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2685{ 2708{
2686 struct drm_device *dev = obj->dev; 2709 struct drm_device *dev = obj->dev;
2687 uint32_t old_write_domain; 2710 uint32_t old_write_domain;
2711 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2688 2712
2689 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2713 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2690 return; 2714 return;
@@ -2692,7 +2716,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2692 /* Queue the GPU write cache flushing we need. */ 2716 /* Queue the GPU write cache flushing we need. */
2693 old_write_domain = obj->write_domain; 2717 old_write_domain = obj->write_domain;
2694 i915_gem_flush(dev, 0, obj->write_domain); 2718 i915_gem_flush(dev, 0, obj->write_domain);
2695 (void) i915_add_request(dev, NULL, obj->write_domain); 2719 (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
2696 BUG_ON(obj->write_domain); 2720 BUG_ON(obj->write_domain);
2697 2721
2698 trace_i915_gem_object_change_domain(obj, 2722 trace_i915_gem_object_change_domain(obj,
@@ -2832,7 +2856,10 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2832 DRM_INFO("%s: object %p wait for seqno %08x\n", 2856 DRM_INFO("%s: object %p wait for seqno %08x\n",
2833 __func__, obj, obj_priv->last_rendering_seqno); 2857 __func__, obj, obj_priv->last_rendering_seqno);
2834#endif 2858#endif
2835 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); 2859 ret = i915_do_wait_request(dev,
2860 obj_priv->last_rendering_seqno,
2861 0,
2862 obj_priv->ring);
2836 if (ret != 0) 2863 if (ret != 0)
2837 return ret; 2864 return ret;
2838 } 2865 }
@@ -3451,7 +3478,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3451 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3478 if (time_after_eq(request->emitted_jiffies, recent_enough))
3452 break; 3479 break;
3453 3480
3454 ret = i915_wait_request(dev, request->seqno); 3481 ret = i915_wait_request(dev, request->seqno, request->ring);
3455 if (ret != 0) 3482 if (ret != 0)
3456 break; 3483 break;
3457 } 3484 }
@@ -3608,6 +3635,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3608 uint32_t seqno, flush_domains, reloc_index; 3635 uint32_t seqno, flush_domains, reloc_index;
3609 int pin_tries, flips; 3636 int pin_tries, flips;
3610 3637
3638 struct intel_ring_buffer *ring = NULL;
3639
3611#if WATCH_EXEC 3640#if WATCH_EXEC
3612 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3641 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3613 (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3642 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3665,6 +3694,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3665 goto pre_mutex_err; 3694 goto pre_mutex_err;
3666 } 3695 }
3667 3696
3697 ring = &dev_priv->render_ring;
3698
3668 /* Look up object handles */ 3699 /* Look up object handles */
3669 flips = 0; 3700 flips = 0;
3670 for (i = 0; i < args->buffer_count; i++) { 3701 for (i = 0; i < args->buffer_count; i++) {
@@ -3798,9 +3829,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3798 i915_gem_flush(dev, 3829 i915_gem_flush(dev,
3799 dev->invalidate_domains, 3830 dev->invalidate_domains,
3800 dev->flush_domains); 3831 dev->flush_domains);
3801 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) 3832 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
3802 (void)i915_add_request(dev, file_priv, 3833 (void)i915_add_request(dev, file_priv,
3803 dev->flush_domains); 3834 dev->flush_domains,
3835 &dev_priv->render_ring);
3836
3837 }
3804 } 3838 }
3805 3839
3806 for (i = 0; i < args->buffer_count; i++) { 3840 for (i = 0; i < args->buffer_count; i++) {
@@ -3837,11 +3871,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3837#endif 3871#endif
3838 3872
3839 /* Exec the batchbuffer */ 3873 /* Exec the batchbuffer */
3840 ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev, 3874 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3841 &dev_priv->render_ring, 3875 cliprects, exec_offset);
3842 args,
3843 cliprects,
3844 exec_offset);
3845 if (ret) { 3876 if (ret) {
3846 DRM_ERROR("dispatch failed %d\n", ret); 3877 DRM_ERROR("dispatch failed %d\n", ret);
3847 goto err; 3878 goto err;
@@ -3851,7 +3882,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3851 * Ensure that the commands in the batch buffer are 3882 * Ensure that the commands in the batch buffer are
3852 * finished before the interrupt fires 3883 * finished before the interrupt fires
3853 */ 3884 */
3854 flush_domains = i915_retire_commands(dev); 3885 flush_domains = i915_retire_commands(dev, ring);
3855 3886
3856 i915_verify_inactive(dev, __FILE__, __LINE__); 3887 i915_verify_inactive(dev, __FILE__, __LINE__);
3857 3888
@@ -3862,12 +3893,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3862 * *some* interrupts representing completion of buffers that we can 3893 * *some* interrupts representing completion of buffers that we can
3863 * wait on when trying to clear up gtt space). 3894 * wait on when trying to clear up gtt space).
3864 */ 3895 */
3865 seqno = i915_add_request(dev, file_priv, flush_domains); 3896 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
3866 BUG_ON(seqno == 0); 3897 BUG_ON(seqno == 0);
3867 for (i = 0; i < args->buffer_count; i++) { 3898 for (i = 0; i < args->buffer_count; i++) {
3868 struct drm_gem_object *obj = object_list[i]; 3899 struct drm_gem_object *obj = object_list[i];
3900 obj_priv = to_intel_bo(obj);
3869 3901
3870 i915_gem_object_move_to_active(obj, seqno); 3902 i915_gem_object_move_to_active(obj, seqno, ring);
3871#if WATCH_LRU 3903#if WATCH_LRU
3872 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 3904 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3873#endif 3905#endif
@@ -3979,7 +4011,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3979 exec2.DR4 = args->DR4; 4011 exec2.DR4 = args->DR4;
3980 exec2.num_cliprects = args->num_cliprects; 4012 exec2.num_cliprects = args->num_cliprects;
3981 exec2.cliprects_ptr = args->cliprects_ptr; 4013 exec2.cliprects_ptr = args->cliprects_ptr;
3982 exec2.flags = 0; 4014 exec2.flags = I915_EXEC_RENDER;
3983 4015
3984 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); 4016 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3985 if (!ret) { 4017 if (!ret) {
@@ -4218,6 +4250,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4218 struct drm_i915_gem_busy *args = data; 4250 struct drm_i915_gem_busy *args = data;
4219 struct drm_gem_object *obj; 4251 struct drm_gem_object *obj;
4220 struct drm_i915_gem_object *obj_priv; 4252 struct drm_i915_gem_object *obj_priv;
4253 drm_i915_private_t *dev_priv = dev->dev_private;
4221 4254
4222 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4255 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4223 if (obj == NULL) { 4256 if (obj == NULL) {
@@ -4232,7 +4265,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4232 * actually unmasked, and our working set ends up being larger than 4265 * actually unmasked, and our working set ends up being larger than
4233 * required. 4266 * required.
4234 */ 4267 */
4235 i915_gem_retire_requests(dev); 4268 i915_gem_retire_requests(dev, &dev_priv->render_ring);
4236 4269
4237 obj_priv = to_intel_bo(obj); 4270 obj_priv = to_intel_bo(obj);
4238 /* Don't count being on the flushing list against the object being 4271 /* Don't count being on the flushing list against the object being
@@ -4555,12 +4588,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4555 } 4588 }
4556 4589
4557 spin_lock(&dev_priv->mm.active_list_lock); 4590 spin_lock(&dev_priv->mm.active_list_lock);
4558 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4591 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4559 spin_unlock(&dev_priv->mm.active_list_lock); 4592 spin_unlock(&dev_priv->mm.active_list_lock);
4560 4593
4561 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4594 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4562 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4595 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4563 BUG_ON(!list_empty(&dev_priv->mm.request_list)); 4596 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4564 mutex_unlock(&dev->struct_mutex); 4597 mutex_unlock(&dev->struct_mutex);
4565 4598
4566 drm_irq_install(dev); 4599 drm_irq_install(dev);
@@ -4599,18 +4632,16 @@ i915_gem_load(struct drm_device *dev)
4599 drm_i915_private_t *dev_priv = dev->dev_private; 4632 drm_i915_private_t *dev_priv = dev->dev_private;
4600 4633
4601 spin_lock_init(&dev_priv->mm.active_list_lock); 4634 spin_lock_init(&dev_priv->mm.active_list_lock);
4602 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4603 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4635 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4604 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4636 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4605 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4637 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4606 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4607 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4638 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4639 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4640 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4608 for (i = 0; i < 16; i++) 4641 for (i = 0; i < 16; i++)
4609 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4642 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4610 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4643 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4611 i915_gem_retire_work_handler); 4644 i915_gem_retire_work_handler);
4612 dev_priv->mm.next_gem_seqno = 1;
4613
4614 spin_lock(&shrink_list_lock); 4645 spin_lock(&shrink_list_lock);
4615 list_add(&dev_priv->mm.shrink_list, &shrink_list); 4646 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4616 spin_unlock(&shrink_list_lock); 4647 spin_unlock(&shrink_list_lock);
@@ -4842,7 +4873,7 @@ i915_gpu_is_active(struct drm_device *dev)
4842 4873
4843 spin_lock(&dev_priv->mm.active_list_lock); 4874 spin_lock(&dev_priv->mm.active_list_lock);
4844 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4875 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4845 list_empty(&dev_priv->mm.active_list); 4876 list_empty(&dev_priv->render_ring.active_list);
4846 spin_unlock(&dev_priv->mm.active_list_lock); 4877 spin_unlock(&dev_priv->mm.active_list_lock);
4847 4878
4848 return !lists_empty; 4879 return !lists_empty;
@@ -4887,8 +4918,7 @@ rescan:
4887 continue; 4918 continue;
4888 4919
4889 spin_unlock(&shrink_list_lock); 4920 spin_unlock(&shrink_list_lock);
4890 4921 i915_gem_retire_requests(dev, &dev_priv->render_ring);
4891 i915_gem_retire_requests(dev);
4892 4922
4893 list_for_each_entry_safe(obj_priv, next_obj, 4923 list_for_each_entry_safe(obj_priv, next_obj,
4894 &dev_priv->mm.inactive_list, 4924 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e07c643c8365..8a667f1db75a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -331,6 +331,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 int ret = IRQ_NONE; 331 int ret = IRQ_NONE;
332 u32 de_iir, gt_iir, de_ier, pch_iir; 332 u32 de_iir, gt_iir, de_ier, pch_iir;
333 struct drm_i915_master_private *master_priv; 333 struct drm_i915_master_private *master_priv;
334 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
334 335
335 /* disable master interrupt before clearing iir */ 336 /* disable master interrupt before clearing iir */
336 de_ier = I915_READ(DEIER); 337 de_ier = I915_READ(DEIER);
@@ -354,10 +355,10 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
354 } 355 }
355 356
356 if (gt_iir & GT_PIPE_NOTIFY) { 357 if (gt_iir & GT_PIPE_NOTIFY) {
357 u32 seqno = i915_get_gem_seqno(dev); 358 u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
358 dev_priv->mm.irq_gem_seqno = seqno; 359 render_ring->irq_gem_seqno = seqno;
359 trace_i915_gem_request_complete(dev, seqno); 360 trace_i915_gem_request_complete(dev, seqno);
360 DRM_WAKEUP(&dev_priv->irq_queue); 361 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
361 dev_priv->hangcheck_count = 0; 362 dev_priv->hangcheck_count = 0;
362 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 363 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
363 } 364 }
@@ -588,7 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
588 return; 589 return;
589 } 590 }
590 591
591 error->seqno = i915_get_gem_seqno(dev); 592 error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
592 error->eir = I915_READ(EIR); 593 error->eir = I915_READ(EIR);
593 error->pgtbl_er = I915_READ(PGTBL_ER); 594 error->pgtbl_er = I915_READ(PGTBL_ER);
594 error->pipeastat = I915_READ(PIPEASTAT); 595 error->pipeastat = I915_READ(PIPEASTAT);
@@ -616,7 +617,9 @@ static void i915_capture_error_state(struct drm_device *dev)
616 batchbuffer[0] = NULL; 617 batchbuffer[0] = NULL;
617 batchbuffer[1] = NULL; 618 batchbuffer[1] = NULL;
618 count = 0; 619 count = 0;
619 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 620 list_for_each_entry(obj_priv,
621 &dev_priv->render_ring.active_list, list) {
622
620 struct drm_gem_object *obj = &obj_priv->base; 623 struct drm_gem_object *obj = &obj_priv->base;
621 624
622 if (batchbuffer[0] == NULL && 625 if (batchbuffer[0] == NULL &&
@@ -653,7 +656,8 @@ static void i915_capture_error_state(struct drm_device *dev)
653 656
654 if (error->active_bo) { 657 if (error->active_bo) {
655 int i = 0; 658 int i = 0;
656 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 659 list_for_each_entry(obj_priv,
660 &dev_priv->render_ring.active_list, list) {
657 struct drm_gem_object *obj = &obj_priv->base; 661 struct drm_gem_object *obj = &obj_priv->base;
658 662
659 error->active_bo[i].size = obj->size; 663 error->active_bo[i].size = obj->size;
@@ -831,7 +835,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
831 /* 835 /*
832 * Wakeup waiting processes so they don't hang 836 * Wakeup waiting processes so they don't hang
833 */ 837 */
834 DRM_WAKEUP(&dev_priv->irq_queue); 838 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
835 } 839 }
836 840
837 queue_work(dev_priv->wq, &dev_priv->error_work); 841 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -850,6 +854,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
850 unsigned long irqflags; 854 unsigned long irqflags;
851 int irq_received; 855 int irq_received;
852 int ret = IRQ_NONE; 856 int ret = IRQ_NONE;
857 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
853 858
854 atomic_inc(&dev_priv->irq_received); 859 atomic_inc(&dev_priv->irq_received);
855 860
@@ -930,10 +935,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
930 } 935 }
931 936
932 if (iir & I915_USER_INTERRUPT) { 937 if (iir & I915_USER_INTERRUPT) {
933 u32 seqno = i915_get_gem_seqno(dev); 938 u32 seqno =
934 dev_priv->mm.irq_gem_seqno = seqno; 939 render_ring->get_gem_seqno(dev, render_ring);
940 render_ring->irq_gem_seqno = seqno;
935 trace_i915_gem_request_complete(dev, seqno); 941 trace_i915_gem_request_complete(dev, seqno);
936 DRM_WAKEUP(&dev_priv->irq_queue); 942 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
937 dev_priv->hangcheck_count = 0; 943 dev_priv->hangcheck_count = 0;
938 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 944 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
939 } 945 }
@@ -1038,7 +1044,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1038 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1044 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1039 1045
1040 render_ring->user_irq_get(dev, render_ring); 1046 render_ring->user_irq_get(dev, render_ring);
1041 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 1047 DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
1042 READ_BREADCRUMB(dev_priv) >= irq_nr); 1048 READ_BREADCRUMB(dev_priv) >= irq_nr);
1043 render_ring->user_irq_put(dev, render_ring); 1049 render_ring->user_irq_put(dev, render_ring);
1044 1050
@@ -1205,9 +1211,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1205 return -EINVAL; 1211 return -EINVAL;
1206} 1212}
1207 1213
1208struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { 1214struct drm_i915_gem_request *
1215i915_get_tail_request(struct drm_device *dev)
1216{
1209 drm_i915_private_t *dev_priv = dev->dev_private; 1217 drm_i915_private_t *dev_priv = dev->dev_private;
1210 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); 1218 return list_entry(dev_priv->render_ring.request_list.prev,
1219 struct drm_i915_gem_request, list);
1211} 1220}
1212 1221
1213/** 1222/**
@@ -1232,8 +1241,10 @@ void i915_hangcheck_elapsed(unsigned long data)
1232 acthd = I915_READ(ACTHD_I965); 1241 acthd = I915_READ(ACTHD_I965);
1233 1242
1234 /* If all work is done then ACTHD clearly hasn't advanced. */ 1243 /* If all work is done then ACTHD clearly hasn't advanced. */
1235 if (list_empty(&dev_priv->mm.request_list) || 1244 if (list_empty(&dev_priv->render_ring.request_list) ||
1236 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { 1245 i915_seqno_passed(i915_get_gem_seqno(dev,
1246 &dev_priv->render_ring),
1247 i915_get_tail_request(dev)->seqno)) {
1237 dev_priv->hangcheck_count = 0; 1248 dev_priv->hangcheck_count = 0;
1238 return; 1249 return;
1239 } 1250 }
@@ -1300,7 +1311,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1300 (void) I915_READ(DEIER); 1311 (void) I915_READ(DEIER);
1301 1312
1302 /* user interrupt should be enabled, but masked initial */ 1313 /* user interrupt should be enabled, but masked initial */
1303 dev_priv->gt_irq_mask_reg = 0xffffffff; 1314 dev_priv->gt_irq_mask_reg = ~render_mask;
1304 dev_priv->gt_irq_enable_reg = render_mask; 1315 dev_priv->gt_irq_enable_reg = render_mask;
1305 1316
1306 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1317 I915_WRITE(GTIIR, I915_READ(GTIIR));
@@ -1363,7 +1374,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1363 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1374 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1364 u32 error_mask; 1375 u32 error_mask;
1365 1376
1366 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 1377 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
1367 1378
1368 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1379 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1369 1380
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 93da83782e5e..d7ad5139d17c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -212,6 +212,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
212{ 212{
213 struct drm_device *dev = overlay->dev; 213 struct drm_device *dev = overlay->dev;
214 int ret; 214 int ret;
215 drm_i915_private_t *dev_priv = dev->dev_private;
215 216
216 BUG_ON(overlay->active); 217 BUG_ON(overlay->active);
217 218
@@ -225,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay)
225 OUT_RING(MI_NOOP); 226 OUT_RING(MI_NOOP);
226 ADVANCE_LP_RING(); 227 ADVANCE_LP_RING();
227 228
228 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 229 overlay->last_flip_req =
230 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
229 if (overlay->last_flip_req == 0) 231 if (overlay->last_flip_req == 0)
230 return -ENOMEM; 232 return -ENOMEM;
231 233
232 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 234 ret = i915_do_wait_request(dev,
235 overlay->last_flip_req, 1, &dev_priv->render_ring);
233 if (ret != 0) 236 if (ret != 0)
234 return ret; 237 return ret;
235 238
@@ -262,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
262 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
263 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
264 267
265 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 268 overlay->last_flip_req =
269 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
266} 270}
267 271
268static int intel_overlay_wait_flip(struct intel_overlay *overlay) 272static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -273,7 +277,8 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
273 u32 tmp; 277 u32 tmp;
274 278
275 if (overlay->last_flip_req != 0) { 279 if (overlay->last_flip_req != 0) {
276 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 280 ret = i915_do_wait_request(dev, overlay->last_flip_req,
281 1, &dev_priv->render_ring);
277 if (ret == 0) { 282 if (ret == 0) {
278 overlay->last_flip_req = 0; 283 overlay->last_flip_req = 0;
279 284
@@ -292,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
292 OUT_RING(MI_NOOP); 297 OUT_RING(MI_NOOP);
293 ADVANCE_LP_RING(); 298 ADVANCE_LP_RING();
294 299
295 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 300 overlay->last_flip_req =
301 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
296 if (overlay->last_flip_req == 0) 302 if (overlay->last_flip_req == 0)
297 return -ENOMEM; 303 return -ENOMEM;
298 304
299 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 305 ret = i915_do_wait_request(dev, overlay->last_flip_req,
306 1, &dev_priv->render_ring);
300 if (ret != 0) 307 if (ret != 0)
301 return ret; 308 return ret;
302 309
@@ -310,6 +317,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
310{ 317{
311 u32 flip_addr = overlay->flip_addr; 318 u32 flip_addr = overlay->flip_addr;
312 struct drm_device *dev = overlay->dev; 319 struct drm_device *dev = overlay->dev;
320 drm_i915_private_t *dev_priv = dev->dev_private;
313 int ret; 321 int ret;
314 322
315 BUG_ON(!overlay->active); 323 BUG_ON(!overlay->active);
@@ -330,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
330 OUT_RING(MI_NOOP); 338 OUT_RING(MI_NOOP);
331 ADVANCE_LP_RING(); 339 ADVANCE_LP_RING();
332 340
333 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 341 overlay->last_flip_req =
342 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
334 if (overlay->last_flip_req == 0) 343 if (overlay->last_flip_req == 0)
335 return -ENOMEM; 344 return -ENOMEM;
336 345
337 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 346 ret = i915_do_wait_request(dev, overlay->last_flip_req,
347 1, &dev_priv->render_ring);
338 if (ret != 0) 348 if (ret != 0)
339 return ret; 349 return ret;
340 350
@@ -348,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
348 OUT_RING(MI_NOOP); 358 OUT_RING(MI_NOOP);
349 ADVANCE_LP_RING(); 359 ADVANCE_LP_RING();
350 360
351 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 361 overlay->last_flip_req =
362 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
352 if (overlay->last_flip_req == 0) 363 if (overlay->last_flip_req == 0)
353 return -ENOMEM; 364 return -ENOMEM;
354 365
355 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 366 ret = i915_do_wait_request(dev, overlay->last_flip_req,
367 1, &dev_priv->render_ring);
356 if (ret != 0) 368 if (ret != 0)
357 return ret; 369 return ret;
358 370
@@ -385,6 +397,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
385{ 397{
386 struct drm_device *dev = overlay->dev; 398 struct drm_device *dev = overlay->dev;
387 struct drm_gem_object *obj; 399 struct drm_gem_object *obj;
400 drm_i915_private_t *dev_priv = dev->dev_private;
388 u32 flip_addr; 401 u32 flip_addr;
389 int ret; 402 int ret;
390 403
@@ -392,12 +405,14 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
392 return -EIO; 405 return -EIO;
393 406
394 if (overlay->last_flip_req == 0) { 407 if (overlay->last_flip_req == 0) {
395 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 408 overlay->last_flip_req =
409 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
396 if (overlay->last_flip_req == 0) 410 if (overlay->last_flip_req == 0)
397 return -ENOMEM; 411 return -ENOMEM;
398 } 412 }
399 413
400 ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); 414 ret = i915_do_wait_request(dev, overlay->last_flip_req,
415 interruptible, &dev_priv->render_ring);
401 if (ret != 0) 416 if (ret != 0)
402 return ret; 417 return ret;
403 418
@@ -421,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
421 OUT_RING(MI_NOOP); 436 OUT_RING(MI_NOOP);
422 ADVANCE_LP_RING(); 437 ADVANCE_LP_RING();
423 438
424 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 439 overlay->last_flip_req = i915_add_request(dev, NULL,
440 0, &dev_priv->render_ring);
425 if (overlay->last_flip_req == 0) 441 if (overlay->last_flip_req == 0)
426 return -ENOMEM; 442 return -ENOMEM;
427 443
428 ret = i915_do_wait_request(dev, overlay->last_flip_req, 444 ret = i915_do_wait_request(dev, overlay->last_flip_req,
429 interruptible); 445 interruptible, &dev_priv->render_ring);
430 if (ret != 0) 446 if (ret != 0)
431 return ret; 447 return ret;
432 448