aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZou Nan hai <nanhai.zou@intel.com>2010-05-20 21:08:55 -0400
committerEric Anholt <eric@anholt.net>2010-05-26 16:24:49 -0400
commit8187a2b70e34c727a06617441f74f202b6fefaf9 (patch)
tree48622c6f95282dc0a0fa668110aac4efa6e89066
parentd3301d86b4bf2bcf649982ae464211d8bcf9575a (diff)
drm/i915: introduce intel_ring_buffer structure (V2)
Introduces a more complete intel_ring_buffer structure with callbacks for setup and management of a particular ringbuffer, and converts the render ring buffer consumers to use it. Signed-off-by: Zou Nan hai <nanhai.zou@intel.com> Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com> [anholt: Fixed up whitespace fail and rebased against prep patches] Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c58
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h80
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c76
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c15
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c582
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h124
-rw-r--r--include/drm/i915_drm.h4
11 files changed, 606 insertions, 377 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 488175c70c7d..4fddf094deb2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,14 +317,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
317 u8 *virt; 317 u8 *virt;
318 uint32_t *ptr, off; 318 uint32_t *ptr, off;
319 319
320 if (!dev_priv->render_ring.ring_obj) { 320 if (!dev_priv->render_ring.gem_object) {
321 seq_printf(m, "No ringbuffer setup\n"); 321 seq_printf(m, "No ringbuffer setup\n");
322 return 0; 322 return 0;
323 } 323 }
324 324
325 virt = dev_priv->render_ring.virtual_start; 325 virt = dev_priv->render_ring.virtual_start;
326 326
327 for (off = 0; off < dev_priv->render_ring.Size; off += 4) { 327 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
328 ptr = (uint32_t *)(virt + off); 328 ptr = (uint32_t *)(virt + off);
329 seq_printf(m, "%08x : %08x\n", off, *ptr); 329 seq_printf(m, "%08x : %08x\n", off, *ptr);
330 } 330 }
@@ -344,7 +344,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
344 344
345 seq_printf(m, "RingHead : %08x\n", head); 345 seq_printf(m, "RingHead : %08x\n", head);
346 seq_printf(m, "RingTail : %08x\n", tail); 346 seq_printf(m, "RingTail : %08x\n", tail);
347 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.Size); 347 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
348 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 348 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
349 349
350 return 0; 350 return 0;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6de7eace4319..2541428b2fe5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,7 +40,6 @@
40#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42 42
43
44/** 43/**
45 * Sets up the hardware status page for devices that need a physical address 44 * Sets up the hardware status page for devices that need a physical address
46 * in the register. 45 * in the register.
@@ -56,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
56 DRM_ERROR("Can not allocate hardware status page\n"); 55 DRM_ERROR("Can not allocate hardware status page\n");
57 return -ENOMEM; 56 return -ENOMEM;
58 } 57 }
59 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 58 dev_priv->render_ring.status_page.page_addr
59 = dev_priv->status_page_dmah->vaddr;
60 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 60 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
61 61
62 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 62 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
63 63
64 if (IS_I965G(dev)) 64 if (IS_I965G(dev))
65 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 65 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -95,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
95{ 95{
96 drm_i915_private_t *dev_priv = dev->dev_private; 96 drm_i915_private_t *dev_priv = dev->dev_private;
97 struct drm_i915_master_private *master_priv; 97 struct drm_i915_master_private *master_priv;
98 drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring); 98 struct intel_ring_buffer *ring = &dev_priv->render_ring;
99 99
100 /* 100 /*
101 * We should never lose context on the ring with modesetting 101 * We should never lose context on the ring with modesetting
@@ -108,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
108 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 108 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
109 ring->space = ring->head - (ring->tail + 8); 109 ring->space = ring->head - (ring->tail + 8);
110 if (ring->space < 0) 110 if (ring->space < 0)
111 ring->space += ring->Size; 111 ring->space += ring->size;
112 112
113 if (!dev->primary->master) 113 if (!dev->primary->master)
114 return; 114 return;
@@ -128,12 +128,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
128 if (dev->irq_enabled) 128 if (dev->irq_enabled)
129 drm_irq_uninstall(dev); 129 drm_irq_uninstall(dev);
130 130
131 if (dev_priv->render_ring.virtual_start) { 131 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
132 drm_core_ioremapfree(&dev_priv->render_ring.map, dev);
133 dev_priv->render_ring.virtual_start = NULL;
134 dev_priv->render_ring.map.handle = NULL;
135 dev_priv->render_ring.map.size = 0;
136 }
137 132
138 /* Clear the HWS virtual address at teardown */ 133 /* Clear the HWS virtual address at teardown */
139 if (I915_NEED_GFX_HWS(dev)) 134 if (I915_NEED_GFX_HWS(dev))
@@ -156,14 +151,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
156 } 151 }
157 152
158 if (init->ring_size != 0) { 153 if (init->ring_size != 0) {
159 if (dev_priv->render_ring.ring_obj != NULL) { 154 if (dev_priv->render_ring.gem_object != NULL) {
160 i915_dma_cleanup(dev); 155 i915_dma_cleanup(dev);
161 DRM_ERROR("Client tried to initialize ringbuffer in " 156 DRM_ERROR("Client tried to initialize ringbuffer in "
162 "GEM mode\n"); 157 "GEM mode\n");
163 return -EINVAL; 158 return -EINVAL;
164 } 159 }
165 160
166 dev_priv->render_ring.Size = init->ring_size; 161 dev_priv->render_ring.size = init->ring_size;
167 162
168 dev_priv->render_ring.map.offset = init->ring_start; 163 dev_priv->render_ring.map.offset = init->ring_start;
169 dev_priv->render_ring.map.size = init->ring_size; 164 dev_priv->render_ring.map.size = init->ring_size;
@@ -201,26 +196,29 @@ static int i915_dma_resume(struct drm_device * dev)
201{ 196{
202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 197 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
203 198
199 struct intel_ring_buffer *ring;
204 DRM_DEBUG_DRIVER("%s\n", __func__); 200 DRM_DEBUG_DRIVER("%s\n", __func__);
205 201
206 if (dev_priv->render_ring.map.handle == NULL) { 202 ring = &dev_priv->render_ring;
203
204 if (ring->map.handle == NULL) {
207 DRM_ERROR("can not ioremap virtual address for" 205 DRM_ERROR("can not ioremap virtual address for"
208 " ring buffer\n"); 206 " ring buffer\n");
209 return -ENOMEM; 207 return -ENOMEM;
210 } 208 }
211 209
212 /* Program Hardware Status Page */ 210 /* Program Hardware Status Page */
213 if (!dev_priv->hw_status_page) { 211 if (!ring->status_page.page_addr) {
214 DRM_ERROR("Can not find hardware status page\n"); 212 DRM_ERROR("Can not find hardware status page\n");
215 return -EINVAL; 213 return -EINVAL;
216 } 214 }
217 DRM_DEBUG_DRIVER("hw status page @ %p\n", 215 DRM_DEBUG_DRIVER("hw status page @ %p\n",
218 dev_priv->hw_status_page); 216 ring->status_page.page_addr);
219 217 if (ring->status_page.gfx_addr != 0)
220 if (dev_priv->status_gfx_addr != 0) 218 ring->setup_status_page(dev, ring);
221 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
222 else 219 else
223 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 220 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
221
224 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 222 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
225 223
226 return 0; 224 return 0;
@@ -330,9 +328,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
330{ 328{
331 drm_i915_private_t *dev_priv = dev->dev_private; 329 drm_i915_private_t *dev_priv = dev->dev_private;
332 int i; 330 int i;
333 RING_LOCALS;
334 331
335 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.Size - 8) 332 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
336 return -EINVAL; 333 return -EINVAL;
337 334
338 BEGIN_LP_RING((dwords+1)&~1); 335 BEGIN_LP_RING((dwords+1)&~1);
@@ -365,9 +362,7 @@ i915_emit_box(struct drm_device *dev,
365 struct drm_clip_rect *boxes, 362 struct drm_clip_rect *boxes,
366 int i, int DR1, int DR4) 363 int i, int DR1, int DR4)
367{ 364{
368 drm_i915_private_t *dev_priv = dev->dev_private;
369 struct drm_clip_rect box = boxes[i]; 365 struct drm_clip_rect box = boxes[i];
370 RING_LOCALS;
371 366
372 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 367 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
373 DRM_ERROR("Bad box %d,%d..%d,%d\n", 368 DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -404,7 +399,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
404{ 399{
405 drm_i915_private_t *dev_priv = dev->dev_private; 400 drm_i915_private_t *dev_priv = dev->dev_private;
406 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 401 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
407 RING_LOCALS;
408 402
409 dev_priv->counter++; 403 dev_priv->counter++;
410 if (dev_priv->counter > 0x7FFFFFFFUL) 404 if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -458,10 +452,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
458 drm_i915_batchbuffer_t * batch, 452 drm_i915_batchbuffer_t * batch,
459 struct drm_clip_rect *cliprects) 453 struct drm_clip_rect *cliprects)
460{ 454{
461 drm_i915_private_t *dev_priv = dev->dev_private;
462 int nbox = batch->num_cliprects; 455 int nbox = batch->num_cliprects;
463 int i = 0, count; 456 int i = 0, count;
464 RING_LOCALS;
465 457
466 if ((batch->start | batch->used) & 0x7) { 458 if ((batch->start | batch->used) & 0x7) {
467 DRM_ERROR("alignment"); 459 DRM_ERROR("alignment");
@@ -510,7 +502,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
510 drm_i915_private_t *dev_priv = dev->dev_private; 502 drm_i915_private_t *dev_priv = dev->dev_private;
511 struct drm_i915_master_private *master_priv = 503 struct drm_i915_master_private *master_priv =
512 dev->primary->master->driver_priv; 504 dev->primary->master->driver_priv;
513 RING_LOCALS;
514 505
515 if (!master_priv->sarea_priv) 506 if (!master_priv->sarea_priv)
516 return -EINVAL; 507 return -EINVAL;
@@ -563,7 +554,8 @@ static int i915_quiescent(struct drm_device * dev)
563 drm_i915_private_t *dev_priv = dev->dev_private; 554 drm_i915_private_t *dev_priv = dev->dev_private;
564 555
565 i915_kernel_lost_context(dev); 556 i915_kernel_lost_context(dev);
566 return i915_wait_ring(dev, dev_priv->render_ring.Size - 8, __func__); 557 return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
558 dev_priv->render_ring.size - 8);
567} 559}
568 560
569static int i915_flush_ioctl(struct drm_device *dev, void *data, 561static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -805,6 +797,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
805{ 797{
806 drm_i915_private_t *dev_priv = dev->dev_private; 798 drm_i915_private_t *dev_priv = dev->dev_private;
807 drm_i915_hws_addr_t *hws = data; 799 drm_i915_hws_addr_t *hws = data;
800 struct intel_ring_buffer *ring = &dev_priv->render_ring;
808 801
809 if (!I915_NEED_GFX_HWS(dev)) 802 if (!I915_NEED_GFX_HWS(dev))
810 return -EINVAL; 803 return -EINVAL;
@@ -821,7 +814,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
821 814
822 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 815 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
823 816
824 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 817 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
825 818
826 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 819 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
827 dev_priv->hws_map.size = 4*1024; 820 dev_priv->hws_map.size = 4*1024;
@@ -837,10 +830,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
837 " G33 hw status page\n"); 830 " G33 hw status page\n");
838 return -ENOMEM; 831 return -ENOMEM;
839 } 832 }
840 dev_priv->hw_status_page = dev_priv->hws_map.handle; 833 ring->status_page.page_addr = dev_priv->hws_map.handle;
834 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
835 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
841 836
842 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
843 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
844 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 837 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
845 dev_priv->status_gfx_addr); 838 dev_priv->status_gfx_addr);
846 DRM_DEBUG_DRIVER("load hws at %p\n", 839 DRM_DEBUG_DRIVER("load hws at %p\n",
@@ -1639,7 +1632,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1639 1632
1640 spin_lock_init(&dev_priv->user_irq_lock); 1633 spin_lock_init(&dev_priv->user_irq_lock);
1641 spin_lock_init(&dev_priv->error_lock); 1634 spin_lock_init(&dev_priv->error_lock);
1642 dev_priv->user_irq_refcount = 0;
1643 dev_priv->trace_irq_seqno = 0; 1635 dev_priv->trace_irq_seqno = 0;
1644 1636
1645 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1637 ret = drm_vblank_init(dev, I915_NUM_PIPE);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a1814f65fdb4..c57c54f403da 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -388,33 +388,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
388 * switched away). 388 * switched away).
389 */ 389 */
390 if (drm_core_check_feature(dev, DRIVER_MODESET) || 390 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
391 !dev_priv->mm.suspended) { 391 !dev_priv->mm.suspended) {
392 drm_i915_ring_buffer_t *ring = &dev_priv->render_ring; 392 struct intel_ring_buffer *ring = &dev_priv->render_ring;
393 struct drm_gem_object *obj = ring->ring_obj;
394 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
395 dev_priv->mm.suspended = 0; 393 dev_priv->mm.suspended = 0;
396 394 ring->init(dev, ring);
397 /* Stop the ring if it's running. */
398 I915_WRITE(PRB0_CTL, 0);
399 I915_WRITE(PRB0_TAIL, 0);
400 I915_WRITE(PRB0_HEAD, 0);
401
402 /* Initialize the ring. */
403 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
404 I915_WRITE(PRB0_CTL,
405 ((obj->size - 4096) & RING_NR_PAGES) |
406 RING_NO_REPORT |
407 RING_VALID);
408 if (!drm_core_check_feature(dev, DRIVER_MODESET))
409 i915_kernel_lost_context(dev);
410 else {
411 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
412 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
413 ring->space = ring->head - (ring->tail + 8);
414 if (ring->space < 0)
415 ring->space += ring->Size;
416 }
417
418 mutex_unlock(&dev->struct_mutex); 395 mutex_unlock(&dev->struct_mutex);
419 drm_irq_uninstall(dev); 396 drm_irq_uninstall(dev);
420 drm_irq_install(dev); 397 drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a39440cf1dee..6bb7933d49dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,8 +31,8 @@
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include "i915_reg.h" 33#include "i915_reg.h"
34#include "i915_drm.h"
35#include "intel_bios.h" 34#include "intel_bios.h"
35#include "intel_ringbuffer.h"
36#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
37 37
38/* General customization: 38/* General customization:
@@ -92,16 +92,6 @@ struct drm_i915_gem_phys_object {
92 struct drm_gem_object *cur_obj; 92 struct drm_gem_object *cur_obj;
93}; 93};
94 94
95typedef struct _drm_i915_ring_buffer {
96 unsigned long Size;
97 u8 *virtual_start;
98 int head;
99 int tail;
100 int space;
101 drm_local_map_t map;
102 struct drm_gem_object *ring_obj;
103} drm_i915_ring_buffer_t;
104
105struct mem_block { 95struct mem_block {
106 struct mem_block *next; 96 struct mem_block *next;
107 struct mem_block *prev; 97 struct mem_block *prev;
@@ -244,7 +234,7 @@ typedef struct drm_i915_private {
244 void __iomem *regs; 234 void __iomem *regs;
245 235
246 struct pci_dev *bridge_dev; 236 struct pci_dev *bridge_dev;
247 drm_i915_ring_buffer_t render_ring; 237 struct intel_ring_buffer render_ring;
248 238
249 drm_dma_handle_t *status_page_dmah; 239 drm_dma_handle_t *status_page_dmah;
250 void *hw_status_page; 240 void *hw_status_page;
@@ -270,8 +260,6 @@ typedef struct drm_i915_private {
270 atomic_t irq_received; 260 atomic_t irq_received;
271 /** Protects user_irq_refcount and irq_mask_reg */ 261 /** Protects user_irq_refcount and irq_mask_reg */
272 spinlock_t user_irq_lock; 262 spinlock_t user_irq_lock;
273 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
274 int user_irq_refcount;
275 u32 trace_irq_seqno; 263 u32 trace_irq_seqno;
276 /** Cached value of IMR to avoid reads in updating the bitfield */ 264 /** Cached value of IMR to avoid reads in updating the bitfield */
277 u32 irq_mask_reg; 265 u32 irq_mask_reg;
@@ -832,9 +820,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
832 struct drm_file *file_priv); 820 struct drm_file *file_priv);
833extern int i915_irq_wait(struct drm_device *dev, void *data, 821extern int i915_irq_wait(struct drm_device *dev, void *data,
834 struct drm_file *file_priv); 822 struct drm_file *file_priv);
835void i915_user_irq_get(struct drm_device *dev);
836void i915_trace_irq_get(struct drm_device *dev, u32 seqno); 823void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
837void i915_user_irq_put(struct drm_device *dev);
838extern void i915_enable_interrupt (struct drm_device *dev); 824extern void i915_enable_interrupt (struct drm_device *dev);
839 825
840extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 826extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -853,8 +839,10 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
853 struct drm_file *file_priv); 839 struct drm_file *file_priv);
854extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 840extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
855extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); 841extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
856void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask); 842extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
857void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask); 843 u32 mask);
844extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
845 u32 mask);
858 846
859void 847void
860i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 848i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -962,8 +950,6 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
962 950
963void i915_gem_shrinker_init(void); 951void i915_gem_shrinker_init(void);
964void i915_gem_shrinker_exit(void); 952void i915_gem_shrinker_exit(void);
965int i915_gem_init_pipe_control(struct drm_device *dev);
966void i915_gem_cleanup_pipe_control(struct drm_device *dev);
967 953
968/* i915_gem_tiling.c */ 954/* i915_gem_tiling.c */
969void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 955void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1014,16 +1000,6 @@ static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return;
1014static inline void opregion_enable_asle(struct drm_device *dev) { return; } 1000static inline void opregion_enable_asle(struct drm_device *dev) { return; }
1015#endif 1001#endif
1016 1002
1017/* intel_ringbuffer.c */
1018extern void i915_gem_flush(struct drm_device *dev,
1019 uint32_t invalidate_domains,
1020 uint32_t flush_domains);
1021extern int i915_dispatch_gem_execbuffer(struct drm_device *dev,
1022 struct drm_i915_gem_execbuffer2 *exec,
1023 struct drm_clip_rect *cliprects,
1024 uint64_t exec_offset);
1025extern uint32_t i915_ring_add_request(struct drm_device *dev);
1026
1027/* modesetting */ 1003/* modesetting */
1028extern void intel_modeset_init(struct drm_device *dev); 1004extern void intel_modeset_init(struct drm_device *dev);
1029extern void intel_modeset_cleanup(struct drm_device *dev); 1005extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1044,7 +1020,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1044 * has access to the ring. 1020 * has access to the ring.
1045 */ 1021 */
1046#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 1022#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
1047 if (((drm_i915_private_t *)dev->dev_private)->render_ring.ring_obj == NULL) \ 1023 if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
1024 == NULL) \
1048 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 1025 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1049} while (0) 1026} while (0)
1050 1027
@@ -1060,32 +1037,27 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1060 1037
1061#define I915_VERBOSE 0 1038#define I915_VERBOSE 0
1062 1039
1063#define RING_LOCALS volatile unsigned int *ring_virt__; 1040#define BEGIN_LP_RING(n) do { \
1064 1041 drm_i915_private_t *dev_priv = dev->dev_private; \
1065#define BEGIN_LP_RING(n) do { \ 1042 if (I915_VERBOSE) \
1066 int bytes__ = 4*(n); \ 1043 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1067 if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 1044 intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \
1068 /* a wrap must occur between instructions so pad beforehand */ \
1069 if (unlikely (dev_priv->render_ring.tail + bytes__ > dev_priv->render_ring.Size)) \
1070 i915_wrap_ring(dev); \
1071 if (unlikely (dev_priv->render_ring.space < bytes__)) \
1072 i915_wait_ring(dev, bytes__, __func__); \
1073 ring_virt__ = (unsigned int *) \
1074 (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail); \
1075 dev_priv->render_ring.tail += bytes__; \
1076 dev_priv->render_ring.tail &= dev_priv->render_ring.Size - 1; \
1077 dev_priv->render_ring.space -= bytes__; \
1078} while (0) 1045} while (0)
1079 1046
1080#define OUT_RING(n) do { \ 1047
1081 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 1048#define OUT_RING(x) do { \
1082 *ring_virt__++ = (n); \ 1049 drm_i915_private_t *dev_priv = dev->dev_private; \
1050 if (I915_VERBOSE) \
1051 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
1052 intel_ring_emit(dev, &dev_priv->render_ring, x); \
1083} while (0) 1053} while (0)
1084 1054
1085#define ADVANCE_LP_RING() do { \ 1055#define ADVANCE_LP_RING() do { \
1056 drm_i915_private_t *dev_priv = dev->dev_private; \
1086 if (I915_VERBOSE) \ 1057 if (I915_VERBOSE) \
1087 DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->render_ring.tail); \ 1058 DRM_DEBUG("ADVANCE_LP_RING %x\n", \
1088 I915_WRITE(PRB0_TAIL, dev_priv->render_ring.tail); \ 1059 dev_priv->render_ring.tail); \
1060 intel_ring_advance(dev, &dev_priv->render_ring); \
1089} while(0) 1061} while(0)
1090 1062
1091/** 1063/**
@@ -1103,14 +1075,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1103 * 1075 *
1104 * The area from dword 0x20 to 0x3ff is available for driver usage. 1076 * The area from dword 0x20 to 0x3ff is available for driver usage.
1105 */ 1077 */
1106#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 1078#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
1079 (dev_priv->render_ring.status_page.page_addr))[reg])
1107#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1080#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1108#define I915_GEM_HWS_INDEX 0x20 1081#define I915_GEM_HWS_INDEX 0x20
1109#define I915_BREADCRUMB_INDEX 0x21 1082#define I915_BREADCRUMB_INDEX 0x21
1110 1083
1111extern int i915_wrap_ring(struct drm_device * dev);
1112extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1113
1114#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1084#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1115 1085
1116#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1086#define IS_I830(dev) ((dev)->pci_device == 0x3577)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 95dbe5628a25..58b6e814fae1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1590,6 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1590 } 1590 }
1591 } 1591 }
1592} 1592}
1593
1593uint32_t 1594uint32_t
1594i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1595i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1595 uint32_t flush_domains) 1596 uint32_t flush_domains)
@@ -1607,7 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1607 if (request == NULL) 1608 if (request == NULL)
1608 return 0; 1609 return 0;
1609 1610
1610 seqno = i915_ring_add_request(dev); 1611 seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring,
1612 file_priv, flush_domains);
1611 1613
1612 DRM_DEBUG_DRIVER("%d\n", seqno); 1614 DRM_DEBUG_DRIVER("%d\n", seqno);
1613 1615
@@ -1645,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1645static uint32_t 1647static uint32_t
1646i915_retire_commands(struct drm_device *dev) 1648i915_retire_commands(struct drm_device *dev)
1647{ 1649{
1648 drm_i915_private_t *dev_priv = dev->dev_private;
1649 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 1650 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1650 uint32_t flush_domains = 0; 1651 uint32_t flush_domains = 0;
1651 RING_LOCALS;
1652 1652
1653 /* The sampler always gets flushed on i965 (sigh) */ 1653 /* The sampler always gets flushed on i965 (sigh) */
1654 if (IS_I965G(dev)) 1654 if (IS_I965G(dev))
@@ -1746,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev)
1746 drm_i915_private_t *dev_priv = dev->dev_private; 1746 drm_i915_private_t *dev_priv = dev->dev_private;
1747 uint32_t seqno; 1747 uint32_t seqno;
1748 1748
1749 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) 1749 struct intel_ring_buffer *ring = &(dev_priv->render_ring);
1750 if (!ring->status_page.page_addr
1751 || list_empty(&dev_priv->mm.request_list))
1750 return; 1752 return;
1751 1753
1752 seqno = i915_get_gem_seqno(dev); 1754 seqno = i915_get_gem_seqno(dev);
@@ -1773,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev)
1773 1775
1774 if (unlikely (dev_priv->trace_irq_seqno && 1776 if (unlikely (dev_priv->trace_irq_seqno &&
1775 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1777 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1776 i915_user_irq_put(dev); 1778
1779 ring->user_irq_put(dev, ring);
1777 dev_priv->trace_irq_seqno = 0; 1780 dev_priv->trace_irq_seqno = 0;
1778 } 1781 }
1779} 1782}
@@ -1803,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1803 u32 ier; 1806 u32 ier;
1804 int ret = 0; 1807 int ret = 0;
1805 1808
1809 struct intel_ring_buffer *ring = &dev_priv->render_ring;
1806 BUG_ON(seqno == 0); 1810 BUG_ON(seqno == 0);
1807 1811
1808 if (atomic_read(&dev_priv->mm.wedged)) 1812 if (atomic_read(&dev_priv->mm.wedged))
@@ -1823,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1823 trace_i915_gem_request_wait_begin(dev, seqno); 1827 trace_i915_gem_request_wait_begin(dev, seqno);
1824 1828
1825 dev_priv->mm.waiting_gem_seqno = seqno; 1829 dev_priv->mm.waiting_gem_seqno = seqno;
1826 i915_user_irq_get(dev); 1830 ring->user_irq_get(dev, ring);
1827 if (interruptible) 1831 if (interruptible)
1828 ret = wait_event_interruptible(dev_priv->irq_queue, 1832 ret = wait_event_interruptible(dev_priv->irq_queue,
1829 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1833 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
@@ -1833,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1833 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1837 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1834 atomic_read(&dev_priv->mm.wedged)); 1838 atomic_read(&dev_priv->mm.wedged));
1835 1839
1836 i915_user_irq_put(dev); 1840 ring->user_irq_put(dev, ring);
1837 dev_priv->mm.waiting_gem_seqno = 0; 1841 dev_priv->mm.waiting_gem_seqno = 0;
1838 1842
1839 trace_i915_gem_request_wait_end(dev, seqno); 1843 trace_i915_gem_request_wait_end(dev, seqno);
@@ -1867,6 +1871,19 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1867} 1871}
1868 1872
1869 1873
1874static void
1875i915_gem_flush(struct drm_device *dev,
1876 uint32_t invalidate_domains,
1877 uint32_t flush_domains)
1878{
1879 drm_i915_private_t *dev_priv = dev->dev_private;
1880 if (flush_domains & I915_GEM_DOMAIN_CPU)
1881 drm_agp_chipset_flush(dev);
1882 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1883 invalidate_domains,
1884 flush_domains);
1885}
1886
1870/** 1887/**
1871 * Ensures that all rendering to the object has completed and the object is 1888 * Ensures that all rendering to the object has completed and the object is
1872 * safe to unbind from the GTT or access from the CPU. 1889 * safe to unbind from the GTT or access from the CPU.
@@ -3820,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3820#endif 3837#endif
3821 3838
3822 /* Exec the batchbuffer */ 3839 /* Exec the batchbuffer */
3823 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); 3840 ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev,
3841 &dev_priv->render_ring,
3842 args,
3843 cliprects,
3844 exec_offset);
3824 if (ret) { 3845 if (ret) {
3825 DRM_ERROR("dispatch failed %d\n", ret); 3846 DRM_ERROR("dispatch failed %d\n", ret);
3826 goto err; 3847 goto err;
@@ -4378,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev)
4378 4399
4379 mutex_lock(&dev->struct_mutex); 4400 mutex_lock(&dev->struct_mutex);
4380 4401
4381 if (dev_priv->mm.suspended || dev_priv->render_ring.ring_obj == NULL) { 4402 if (dev_priv->mm.suspended ||
4403 dev_priv->render_ring.gem_object == NULL) {
4382 mutex_unlock(&dev->struct_mutex); 4404 mutex_unlock(&dev->struct_mutex);
4383 return 0; 4405 return 0;
4384 } 4406 }
@@ -4420,7 +4442,7 @@ i915_gem_idle(struct drm_device *dev)
4420 * 965+ support PIPE_CONTROL commands, which provide finer grained control 4442 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4421 * over cache flushing. 4443 * over cache flushing.
4422 */ 4444 */
4423int 4445static int
4424i915_gem_init_pipe_control(struct drm_device *dev) 4446i915_gem_init_pipe_control(struct drm_device *dev)
4425{ 4447{
4426 drm_i915_private_t *dev_priv = dev->dev_private; 4448 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4459,7 +4481,8 @@ err:
4459 return ret; 4481 return ret;
4460} 4482}
4461 4483
4462void 4484
4485static void
4463i915_gem_cleanup_pipe_control(struct drm_device *dev) 4486i915_gem_cleanup_pipe_control(struct drm_device *dev)
4464{ 4487{
4465 drm_i915_private_t *dev_priv = dev->dev_private; 4488 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4477,6 +4500,37 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
4477} 4500}
4478 4501
4479int 4502int
4503i915_gem_init_ringbuffer(struct drm_device *dev)
4504{
4505 drm_i915_private_t *dev_priv = dev->dev_private;
4506 int ret;
4507 dev_priv->render_ring = render_ring;
4508 if (!I915_NEED_GFX_HWS(dev)) {
4509 dev_priv->render_ring.status_page.page_addr
4510 = dev_priv->status_page_dmah->vaddr;
4511 memset(dev_priv->render_ring.status_page.page_addr,
4512 0, PAGE_SIZE);
4513 }
4514 if (HAS_PIPE_CONTROL(dev)) {
4515 ret = i915_gem_init_pipe_control(dev);
4516 if (ret)
4517 return ret;
4518 }
4519 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4520 return ret;
4521}
4522
4523void
4524i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4525{
4526 drm_i915_private_t *dev_priv = dev->dev_private;
4527
4528 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4529 if (HAS_PIPE_CONTROL(dev))
4530 i915_gem_cleanup_pipe_control(dev);
4531}
4532
4533int
4480i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 4534i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4481 struct drm_file *file_priv) 4535 struct drm_file *file_priv)
4482{ 4536{
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dd91c97de968..e07c643c8365 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -545,7 +545,8 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
545 } 545 }
546 546
547 if (bbaddr == 0) { 547 if (bbaddr == 0) {
548 ring = (u32 *)(dev_priv->render_ring.virtual_start + dev_priv->render_ring.Size); 548 ring = (u32 *)(dev_priv->render_ring.virtual_start
549 + dev_priv->render_ring.size);
549 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 550 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
550 bbaddr = i915_get_bbaddr(dev, ring); 551 bbaddr = i915_get_bbaddr(dev, ring);
551 if (bbaddr) 552 if (bbaddr)
@@ -639,7 +640,8 @@ static void i915_capture_error_state(struct drm_device *dev)
639 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 640 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
640 641
641 /* Record the ringbuffer */ 642 /* Record the ringbuffer */
642 error->ringbuffer = i915_error_object_create(dev, dev_priv->render_ring.ring_obj); 643 error->ringbuffer = i915_error_object_create(dev,
644 dev_priv->render_ring.gem_object);
643 645
644 /* Record buffers on the active list. */ 646 /* Record buffers on the active list. */
645 error->active_bo = NULL; 647 error->active_bo = NULL;
@@ -984,7 +986,6 @@ static int i915_emit_irq(struct drm_device * dev)
984{ 986{
985 drm_i915_private_t *dev_priv = dev->dev_private; 987 drm_i915_private_t *dev_priv = dev->dev_private;
986 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 988 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
987 RING_LOCALS;
988 989
989 i915_kernel_lost_context(dev); 990 i915_kernel_lost_context(dev);
990 991
@@ -1009,9 +1010,10 @@ static int i915_emit_irq(struct drm_device * dev)
1009void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1010void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1010{ 1011{
1011 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1012 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1013 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1012 1014
1013 if (dev_priv->trace_irq_seqno == 0) 1015 if (dev_priv->trace_irq_seqno == 0)
1014 i915_user_irq_get(dev); 1016 render_ring->user_irq_get(dev, render_ring);
1015 1017
1016 dev_priv->trace_irq_seqno = seqno; 1018 dev_priv->trace_irq_seqno = seqno;
1017} 1019}
@@ -1021,6 +1023,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1021 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1023 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1022 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1024 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1023 int ret = 0; 1025 int ret = 0;
1026 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1024 1027
1025 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1028 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1026 READ_BREADCRUMB(dev_priv)); 1029 READ_BREADCRUMB(dev_priv));
@@ -1034,10 +1037,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1034 if (master_priv->sarea_priv) 1037 if (master_priv->sarea_priv)
1035 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1038 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1036 1039
1037 i915_user_irq_get(dev); 1040 render_ring->user_irq_get(dev, render_ring);
1038 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 1041 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
1039 READ_BREADCRUMB(dev_priv) >= irq_nr); 1042 READ_BREADCRUMB(dev_priv) >= irq_nr);
1040 i915_user_irq_put(dev); 1043 render_ring->user_irq_put(dev, render_ring);
1041 1044
1042 if (ret == -EBUSY) { 1045 if (ret == -EBUSY) {
1043 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1046 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f469a84cacfd..b867f3c78408 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4629,7 +4629,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4629 unsigned long flags; 4629 unsigned long flags;
4630 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4630 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4631 int ret, pipesrc; 4631 int ret, pipesrc;
4632 RING_LOCALS;
4633 4632
4634 work = kzalloc(sizeof *work, GFP_KERNEL); 4633 work = kzalloc(sizeof *work, GFP_KERNEL);
4635 if (work == NULL) 4634 if (work == NULL)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index b0e17b06eb6e..93da83782e5e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,9 +211,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
211static int intel_overlay_on(struct intel_overlay *overlay) 211static int intel_overlay_on(struct intel_overlay *overlay)
212{ 212{
213 struct drm_device *dev = overlay->dev; 213 struct drm_device *dev = overlay->dev;
214 drm_i915_private_t *dev_priv = dev->dev_private;
215 int ret; 214 int ret;
216 RING_LOCALS;
217 215
218 BUG_ON(overlay->active); 216 BUG_ON(overlay->active);
219 217
@@ -248,7 +246,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
248 drm_i915_private_t *dev_priv = dev->dev_private; 246 drm_i915_private_t *dev_priv = dev->dev_private;
249 u32 flip_addr = overlay->flip_addr; 247 u32 flip_addr = overlay->flip_addr;
250 u32 tmp; 248 u32 tmp;
251 RING_LOCALS;
252 249
253 BUG_ON(!overlay->active); 250 BUG_ON(!overlay->active);
254 251
@@ -274,7 +271,6 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
274 drm_i915_private_t *dev_priv = dev->dev_private; 271 drm_i915_private_t *dev_priv = dev->dev_private;
275 int ret; 272 int ret;
276 u32 tmp; 273 u32 tmp;
277 RING_LOCALS;
278 274
279 if (overlay->last_flip_req != 0) { 275 if (overlay->last_flip_req != 0) {
280 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 276 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
@@ -314,9 +310,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
314{ 310{
315 u32 flip_addr = overlay->flip_addr; 311 u32 flip_addr = overlay->flip_addr;
316 struct drm_device *dev = overlay->dev; 312 struct drm_device *dev = overlay->dev;
317 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret; 313 int ret;
319 RING_LOCALS;
320 314
321 BUG_ON(!overlay->active); 315 BUG_ON(!overlay->active);
322 316
@@ -390,11 +384,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
390 int interruptible) 384 int interruptible)
391{ 385{
392 struct drm_device *dev = overlay->dev; 386 struct drm_device *dev = overlay->dev;
393 drm_i915_private_t *dev_priv = dev->dev_private;
394 struct drm_gem_object *obj; 387 struct drm_gem_object *obj;
395 u32 flip_addr; 388 u32 flip_addr;
396 int ret; 389 int ret;
397 RING_LOCALS;
398 390
399 if (overlay->hw_wedged == HW_WEDGED) 391 if (overlay->hw_wedged == HW_WEDGED)
400 return -EIO; 392 return -EIO;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 06058ddb4eed..5715c4d8cce9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -29,30 +29,24 @@
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "drm.h" 31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h" 32#include "i915_drv.h"
33#include "i915_drm.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h"
36 35
37void 36static void
38i915_gem_flush(struct drm_device *dev, 37render_ring_flush(struct drm_device *dev,
39 uint32_t invalidate_domains, 38 struct intel_ring_buffer *ring,
40 uint32_t flush_domains) 39 u32 invalidate_domains,
40 u32 flush_domains)
41{ 41{
42 drm_i915_private_t *dev_priv = dev->dev_private;
43 uint32_t cmd;
44 RING_LOCALS;
45
46#if WATCH_EXEC 42#if WATCH_EXEC
47 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
48 invalidate_domains, flush_domains); 44 invalidate_domains, flush_domains);
49#endif 45#endif
50 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, 46 u32 cmd;
47 trace_i915_gem_request_flush(dev, ring->next_seqno,
51 invalidate_domains, flush_domains); 48 invalidate_domains, flush_domains);
52 49
53 if (flush_domains & I915_GEM_DOMAIN_CPU)
54 drm_agp_chipset_flush(dev);
55
56 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 50 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
57 /* 51 /*
58 * read/write caches: 52 * read/write caches:
@@ -100,19 +94,130 @@ i915_gem_flush(struct drm_device *dev,
100#if WATCH_EXEC 94#if WATCH_EXEC
101 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
102#endif 96#endif
103 BEGIN_LP_RING(2); 97 intel_ring_begin(dev, ring, 8);
104 OUT_RING(cmd); 98 intel_ring_emit(dev, ring, cmd);
105 OUT_RING(MI_NOOP); 99 intel_ring_emit(dev, ring, MI_NOOP);
106 ADVANCE_LP_RING(); 100 intel_ring_advance(dev, ring);
107 } 101 }
102}
103
104static unsigned int render_ring_get_head(struct drm_device *dev,
105 struct intel_ring_buffer *ring)
106{
107 drm_i915_private_t *dev_priv = dev->dev_private;
108 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
109}
108 110
111static unsigned int render_ring_get_tail(struct drm_device *dev,
112 struct intel_ring_buffer *ring)
113{
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 return I915_READ(PRB0_TAIL) & TAIL_ADDR;
109} 116}
117
118static unsigned int render_ring_get_active_head(struct drm_device *dev,
119 struct intel_ring_buffer *ring)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
123
124 return I915_READ(acthd_reg);
125}
126
127static void render_ring_advance_ring(struct drm_device *dev,
128 struct intel_ring_buffer *ring)
129{
130 drm_i915_private_t *dev_priv = dev->dev_private;
131 I915_WRITE(PRB0_TAIL, ring->tail);
132}
133
134static int init_ring_common(struct drm_device *dev,
135 struct intel_ring_buffer *ring)
136{
137 u32 head;
138 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv;
140 obj_priv = to_intel_bo(ring->gem_object);
141
142 /* Stop the ring if it's running. */
143 I915_WRITE(ring->regs.ctl, 0);
144 I915_WRITE(ring->regs.head, 0);
145 I915_WRITE(ring->regs.tail, 0);
146
147 /* Initialize the ring. */
148 I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
149 head = ring->get_head(dev, ring);
150
151 /* G45 ring initialization fails to reset head to zero */
152 if (head != 0) {
153 DRM_ERROR("%s head not reset to zero "
154 "ctl %08x head %08x tail %08x start %08x\n",
155 ring->name,
156 I915_READ(ring->regs.ctl),
157 I915_READ(ring->regs.head),
158 I915_READ(ring->regs.tail),
159 I915_READ(ring->regs.start));
160
161 I915_WRITE(ring->regs.head, 0);
162
163 DRM_ERROR("%s head forced to zero "
164 "ctl %08x head %08x tail %08x start %08x\n",
165 ring->name,
166 I915_READ(ring->regs.ctl),
167 I915_READ(ring->regs.head),
168 I915_READ(ring->regs.tail),
169 I915_READ(ring->regs.start));
170 }
171
172 I915_WRITE(ring->regs.ctl,
173 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
174 | RING_NO_REPORT | RING_VALID);
175
176 head = I915_READ(ring->regs.head) & HEAD_ADDR;
177 /* If the head is still not zero, the ring is dead */
178 if (head != 0) {
179 DRM_ERROR("%s initialization failed "
180 "ctl %08x head %08x tail %08x start %08x\n",
181 ring->name,
182 I915_READ(ring->regs.ctl),
183 I915_READ(ring->regs.head),
184 I915_READ(ring->regs.tail),
185 I915_READ(ring->regs.start));
186 return -EIO;
187 }
188
189 if (!drm_core_check_feature(dev, DRIVER_MODESET))
190 i915_kernel_lost_context(dev);
191 else {
192 ring->head = ring->get_head(dev, ring);
193 ring->tail = ring->get_tail(dev, ring);
194 ring->space = ring->head - (ring->tail + 8);
195 if (ring->space < 0)
196 ring->space += ring->size;
197 }
198 return 0;
199}
200
201static int init_render_ring(struct drm_device *dev,
202 struct intel_ring_buffer *ring)
203{
204 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring);
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE,
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
209 }
210 return ret;
211}
212
110#define PIPE_CONTROL_FLUSH(addr) \ 213#define PIPE_CONTROL_FLUSH(addr) \
214do { \
111 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 215 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
112 PIPE_CONTROL_DEPTH_STALL); \ 216 PIPE_CONTROL_DEPTH_STALL); \
113 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ 217 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
114 OUT_RING(0); \ 218 OUT_RING(0); \
115 OUT_RING(0); \ 219 OUT_RING(0); \
220} while (0)
116 221
117/** 222/**
118 * Creates a new sequence number, emitting a write of it to the status page 223 * Creates a new sequence number, emitting a write of it to the status page
@@ -122,21 +227,15 @@ i915_gem_flush(struct drm_device *dev,
122 * 227 *
123 * Returned sequence numbers are nonzero on success. 228 * Returned sequence numbers are nonzero on success.
124 */ 229 */
125uint32_t 230static u32
126i915_ring_add_request(struct drm_device *dev) 231render_ring_add_request(struct drm_device *dev,
232 struct intel_ring_buffer *ring,
233 struct drm_file *file_priv,
234 u32 flush_domains)
127{ 235{
236 u32 seqno;
128 drm_i915_private_t *dev_priv = dev->dev_private; 237 drm_i915_private_t *dev_priv = dev->dev_private;
129 uint32_t seqno; 238 seqno = intel_ring_get_seqno(dev, ring);
130 RING_LOCALS;
131
132 /* Grab the seqno we're going to make this request be, and bump the
133 * next (skipping 0 so it can be the reserved no-seqno value).
134 */
135 seqno = dev_priv->mm.next_gem_seqno;
136 dev_priv->mm.next_gem_seqno++;
137 if (dev_priv->mm.next_gem_seqno == 0)
138 dev_priv->mm.next_gem_seqno++;
139
140 if (HAS_PIPE_CONTROL(dev)) { 239 if (HAS_PIPE_CONTROL(dev)) {
141 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 240 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
142 241
@@ -181,13 +280,26 @@ i915_ring_add_request(struct drm_device *dev)
181 return seqno; 280 return seqno;
182} 281}
183 282
184void i915_user_irq_get(struct drm_device *dev) 283static u32
284render_ring_get_gem_seqno(struct drm_device *dev,
285 struct intel_ring_buffer *ring)
286{
287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
288 if (HAS_PIPE_CONTROL(dev))
289 return ((volatile u32 *)(dev_priv->seqno_page))[0];
290 else
291 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
292}
293
294static void
295render_ring_get_user_irq(struct drm_device *dev,
296 struct intel_ring_buffer *ring)
185{ 297{
186 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 298 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
187 unsigned long irqflags; 299 unsigned long irqflags;
188 300
189 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 301 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
190 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 302 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
191 if (HAS_PCH_SPLIT(dev)) 303 if (HAS_PCH_SPLIT(dev))
192 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 304 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
193 else 305 else
@@ -196,14 +308,16 @@ void i915_user_irq_get(struct drm_device *dev)
196 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 308 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
197} 309}
198 310
199void i915_user_irq_put(struct drm_device *dev) 311static void
312render_ring_put_user_irq(struct drm_device *dev,
313 struct intel_ring_buffer *ring)
200{ 314{
201 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
202 unsigned long irqflags; 316 unsigned long irqflags;
203 317
204 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 318 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
205 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 319 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
206 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 320 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
207 if (HAS_PCH_SPLIT(dev)) 321 if (HAS_PCH_SPLIT(dev))
208 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 322 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
209 else 323 else
@@ -212,20 +326,31 @@ void i915_user_irq_put(struct drm_device *dev)
212 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 326 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
213} 327}
214 328
215/** Dispatch a batchbuffer to the ring 329static void render_setup_status_page(struct drm_device *dev,
216 */ 330 struct intel_ring_buffer *ring)
217int 331{
218i915_dispatch_gem_execbuffer(struct drm_device *dev, 332 drm_i915_private_t *dev_priv = dev->dev_private;
219 struct drm_i915_gem_execbuffer2 *exec, 333 if (IS_GEN6(dev)) {
220 struct drm_clip_rect *cliprects, 334 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
221 uint64_t exec_offset) 335 I915_READ(HWS_PGA_GEN6); /* posting read */
336 } else {
337 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
338 I915_READ(HWS_PGA); /* posting read */
339 }
340
341}
342
343static int
344render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
345 struct intel_ring_buffer *ring,
346 struct drm_i915_gem_execbuffer2 *exec,
347 struct drm_clip_rect *cliprects,
348 uint64_t exec_offset)
222{ 349{
223 drm_i915_private_t *dev_priv = dev->dev_private; 350 drm_i915_private_t *dev_priv = dev->dev_private;
224 int nbox = exec->num_cliprects; 351 int nbox = exec->num_cliprects;
225 int i = 0, count; 352 int i = 0, count;
226 uint32_t exec_start, exec_len; 353 uint32_t exec_start, exec_len;
227 RING_LOCALS;
228
229 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 354 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
230 exec_len = (uint32_t) exec->batch_len; 355 exec_len = (uint32_t) exec->batch_len;
231 356
@@ -242,74 +367,61 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
242 } 367 }
243 368
244 if (IS_I830(dev) || IS_845G(dev)) { 369 if (IS_I830(dev) || IS_845G(dev)) {
245 BEGIN_LP_RING(4); 370 intel_ring_begin(dev, ring, 4);
246 OUT_RING(MI_BATCH_BUFFER); 371 intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
247 OUT_RING(exec_start | MI_BATCH_NON_SECURE); 372 intel_ring_emit(dev, ring,
248 OUT_RING(exec_start + exec_len - 4); 373 exec_start | MI_BATCH_NON_SECURE);
249 OUT_RING(0); 374 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
250 ADVANCE_LP_RING(); 375 intel_ring_emit(dev, ring, 0);
251 } else { 376 } else {
252 BEGIN_LP_RING(2); 377 intel_ring_begin(dev, ring, 4);
253 if (IS_I965G(dev)) { 378 if (IS_I965G(dev)) {
254 OUT_RING(MI_BATCH_BUFFER_START | 379 intel_ring_emit(dev, ring,
255 (2 << 6) | 380 MI_BATCH_BUFFER_START | (2 << 6)
256 MI_BATCH_NON_SECURE_I965); 381 | MI_BATCH_NON_SECURE_I965);
257 OUT_RING(exec_start); 382 intel_ring_emit(dev, ring, exec_start);
258 } else { 383 } else {
259 OUT_RING(MI_BATCH_BUFFER_START | 384 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
260 (2 << 6)); 385 | (2 << 6));
261 OUT_RING(exec_start | MI_BATCH_NON_SECURE); 386 intel_ring_emit(dev, ring, exec_start |
387 MI_BATCH_NON_SECURE);
262 } 388 }
263 ADVANCE_LP_RING();
264 } 389 }
390 intel_ring_advance(dev, ring);
265 } 391 }
266 392
267 /* XXX breadcrumb */ 393 /* XXX breadcrumb */
268 return 0; 394 return 0;
269} 395}
270 396
271static void 397static void cleanup_status_page(struct drm_device *dev,
272i915_gem_cleanup_hws(struct drm_device *dev) 398 struct intel_ring_buffer *ring)
273{ 399{
274 drm_i915_private_t *dev_priv = dev->dev_private; 400 drm_i915_private_t *dev_priv = dev->dev_private;
275 struct drm_gem_object *obj; 401 struct drm_gem_object *obj;
276 struct drm_i915_gem_object *obj_priv; 402 struct drm_i915_gem_object *obj_priv;
277 403
278 if (dev_priv->hws_obj == NULL) 404 obj = ring->status_page.obj;
405 if (obj == NULL)
279 return; 406 return;
280
281 obj = dev_priv->hws_obj;
282 obj_priv = to_intel_bo(obj); 407 obj_priv = to_intel_bo(obj);
283 408
284 kunmap(obj_priv->pages[0]); 409 kunmap(obj_priv->pages[0]);
285 i915_gem_object_unpin(obj); 410 i915_gem_object_unpin(obj);
286 drm_gem_object_unreference(obj); 411 drm_gem_object_unreference(obj);
287 dev_priv->hws_obj = NULL; 412 ring->status_page.obj = NULL;
288 413
289 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 414 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
290 dev_priv->hw_status_page = NULL;
291
292 if (HAS_PIPE_CONTROL(dev))
293 i915_gem_cleanup_pipe_control(dev);
294
295 /* Write high address into HWS_PGA when disabling. */
296 I915_WRITE(HWS_PGA, 0x1ffff000);
297} 415}
298 416
299static int 417static int init_status_page(struct drm_device *dev,
300i915_gem_init_hws(struct drm_device *dev) 418 struct intel_ring_buffer *ring)
301{ 419{
302 drm_i915_private_t *dev_priv = dev->dev_private; 420 drm_i915_private_t *dev_priv = dev->dev_private;
303 struct drm_gem_object *obj; 421 struct drm_gem_object *obj;
304 struct drm_i915_gem_object *obj_priv; 422 struct drm_i915_gem_object *obj_priv;
305 int ret; 423 int ret;
306 424
307 /* If we need a physical address for the status page, it's already
308 * initialized at driver load time.
309 */
310 if (!I915_NEED_GFX_HWS(dev))
311 return 0;
312
313 obj = i915_gem_alloc_object(dev, 4096); 425 obj = i915_gem_alloc_object(dev, 4096);
314 if (obj == NULL) { 426 if (obj == NULL) {
315 DRM_ERROR("Failed to allocate status page\n"); 427 DRM_ERROR("Failed to allocate status page\n");
@@ -321,36 +433,21 @@ i915_gem_init_hws(struct drm_device *dev)
321 433
322 ret = i915_gem_object_pin(obj, 4096); 434 ret = i915_gem_object_pin(obj, 4096);
323 if (ret != 0) { 435 if (ret != 0) {
324 drm_gem_object_unreference(obj);
325 goto err_unref; 436 goto err_unref;
326 } 437 }
327 438
328 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 439 ring->status_page.gfx_addr = obj_priv->gtt_offset;
329 440 ring->status_page.page_addr = kmap(obj_priv->pages[0]);
330 dev_priv->hw_status_page = kmap(obj_priv->pages[0]); 441 if (ring->status_page.page_addr == NULL) {
331 if (dev_priv->hw_status_page == NULL) {
332 DRM_ERROR("Failed to map status page.\n");
333 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 442 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
334 ret = -EINVAL;
335 goto err_unpin; 443 goto err_unpin;
336 } 444 }
445 ring->status_page.obj = obj;
446 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
337 447
338 if (HAS_PIPE_CONTROL(dev)) { 448 ring->setup_status_page(dev, ring);
339 ret = i915_gem_init_pipe_control(dev); 449 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
340 if (ret) 450 ring->name, ring->status_page.gfx_addr);
341 goto err_unpin;
342 }
343
344 dev_priv->hws_obj = obj;
345 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
346 if (IS_GEN6(dev)) {
347 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
348 I915_READ(HWS_PGA_GEN6); /* posting read */
349 } else {
350 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
351 I915_READ(HWS_PGA); /* posting read */
352 }
353 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
354 451
355 return 0; 452 return 0;
356 453
@@ -359,43 +456,42 @@ err_unpin:
359err_unref: 456err_unref:
360 drm_gem_object_unreference(obj); 457 drm_gem_object_unreference(obj);
361err: 458err:
362 return 0; 459 return ret;
363} 460}
364 461
365int 462
366i915_gem_init_ringbuffer(struct drm_device *dev) 463int intel_init_ring_buffer(struct drm_device *dev,
464 struct intel_ring_buffer *ring)
367{ 465{
368 drm_i915_private_t *dev_priv = dev->dev_private;
369 struct drm_gem_object *obj;
370 struct drm_i915_gem_object *obj_priv;
371 drm_i915_ring_buffer_t *ring = &dev_priv->render_ring;
372 int ret; 466 int ret;
373 u32 head; 467 struct drm_i915_gem_object *obj_priv;
468 struct drm_gem_object *obj;
469 ring->dev = dev;
374 470
375 ret = i915_gem_init_hws(dev); 471 if (I915_NEED_GFX_HWS(dev)) {
376 if (ret != 0) 472 ret = init_status_page(dev, ring);
377 return ret; 473 if (ret)
474 return ret;
475 }
378 476
379 obj = i915_gem_alloc_object(dev, 128 * 1024); 477 obj = i915_gem_alloc_object(dev, ring->size);
380 if (obj == NULL) { 478 if (obj == NULL) {
381 DRM_ERROR("Failed to allocate ringbuffer\n"); 479 DRM_ERROR("Failed to allocate ringbuffer\n");
382 i915_gem_cleanup_hws(dev); 480 ret = -ENOMEM;
383 return -ENOMEM; 481 goto cleanup;
384 } 482 }
385 obj_priv = to_intel_bo(obj);
386 483
387 ret = i915_gem_object_pin(obj, 4096); 484 ring->gem_object = obj;
485
486 ret = i915_gem_object_pin(obj, ring->alignment);
388 if (ret != 0) { 487 if (ret != 0) {
389 drm_gem_object_unreference(obj); 488 drm_gem_object_unreference(obj);
390 i915_gem_cleanup_hws(dev); 489 goto cleanup;
391 return ret;
392 } 490 }
393 491
394 /* Set up the kernel mapping for the ring. */ 492 obj_priv = to_intel_bo(obj);
395 ring->Size = obj->size; 493 ring->map.size = ring->size;
396
397 ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 494 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
398 ring->map.size = obj->size;
399 ring->map.type = 0; 495 ring->map.type = 0;
400 ring->map.flags = 0; 496 ring->map.flags = 0;
401 ring->map.mtrr = 0; 497 ring->map.mtrr = 0;
@@ -403,143 +499,85 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
403 drm_core_ioremap_wc(&ring->map, dev); 499 drm_core_ioremap_wc(&ring->map, dev);
404 if (ring->map.handle == NULL) { 500 if (ring->map.handle == NULL) {
405 DRM_ERROR("Failed to map ringbuffer.\n"); 501 DRM_ERROR("Failed to map ringbuffer.\n");
406 memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring));
407 i915_gem_object_unpin(obj); 502 i915_gem_object_unpin(obj);
408 drm_gem_object_unreference(obj); 503 drm_gem_object_unreference(obj);
409 i915_gem_cleanup_hws(dev); 504 ret = -EINVAL;
410 return -EINVAL; 505 goto cleanup;
411 }
412 ring->ring_obj = obj;
413 ring->virtual_start = ring->map.handle;
414
415 /* Stop the ring if it's running. */
416 I915_WRITE(PRB0_CTL, 0);
417 I915_WRITE(PRB0_TAIL, 0);
418 I915_WRITE(PRB0_HEAD, 0);
419
420 /* Initialize the ring. */
421 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
422 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
423
424 /* G45 ring initialization fails to reset head to zero */
425 if (head != 0) {
426 DRM_ERROR("Ring head not reset to zero "
427 "ctl %08x head %08x tail %08x start %08x\n",
428 I915_READ(PRB0_CTL),
429 I915_READ(PRB0_HEAD),
430 I915_READ(PRB0_TAIL),
431 I915_READ(PRB0_START));
432 I915_WRITE(PRB0_HEAD, 0);
433
434 DRM_ERROR("Ring head forced to zero "
435 "ctl %08x head %08x tail %08x start %08x\n",
436 I915_READ(PRB0_CTL),
437 I915_READ(PRB0_HEAD),
438 I915_READ(PRB0_TAIL),
439 I915_READ(PRB0_START));
440 } 506 }
441 507
442 I915_WRITE(PRB0_CTL, 508 ring->virtual_start = ring->map.handle;
443 ((obj->size - 4096) & RING_NR_PAGES) | 509 ret = ring->init(dev, ring);
444 RING_NO_REPORT | 510 if (ret != 0) {
445 RING_VALID); 511 intel_cleanup_ring_buffer(dev, ring);
446 512 return ret;
447 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
448
449 /* If the head is still not zero, the ring is dead */
450 if (head != 0) {
451 DRM_ERROR("Ring initialization failed "
452 "ctl %08x head %08x tail %08x start %08x\n",
453 I915_READ(PRB0_CTL),
454 I915_READ(PRB0_HEAD),
455 I915_READ(PRB0_TAIL),
456 I915_READ(PRB0_START));
457 return -EIO;
458 } 513 }
459 514
460 /* Update our cache of the ring state */
461 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 515 if (!drm_core_check_feature(dev, DRIVER_MODESET))
462 i915_kernel_lost_context(dev); 516 i915_kernel_lost_context(dev);
463 else { 517 else {
464 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 518 ring->head = ring->get_head(dev, ring);
465 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 519 ring->tail = ring->get_tail(dev, ring);
466 ring->space = ring->head - (ring->tail + 8); 520 ring->space = ring->head - (ring->tail + 8);
467 if (ring->space < 0) 521 if (ring->space < 0)
468 ring->space += ring->Size; 522 ring->space += ring->size;
469 } 523 }
470 524 INIT_LIST_HEAD(&ring->active_list);
471 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 525 INIT_LIST_HEAD(&ring->request_list);
472 I915_WRITE(MI_MODE, 526 return ret;
473 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 527cleanup:
474 } 528 cleanup_status_page(dev, ring);
475 529 return ret;
476 return 0;
477} 530}
478 531
479void 532void intel_cleanup_ring_buffer(struct drm_device *dev,
480i915_gem_cleanup_ringbuffer(struct drm_device *dev) 533 struct intel_ring_buffer *ring)
481{ 534{
482 drm_i915_private_t *dev_priv = dev->dev_private; 535 if (ring->gem_object == NULL)
483
484 if (dev_priv->render_ring.ring_obj == NULL)
485 return; 536 return;
486 537
487 drm_core_ioremapfree(&dev_priv->render_ring.map, dev); 538 drm_core_ioremapfree(&ring->map, dev);
488
489 i915_gem_object_unpin(dev_priv->render_ring.ring_obj);
490 drm_gem_object_unreference(dev_priv->render_ring.ring_obj);
491 dev_priv->render_ring.ring_obj = NULL;
492 memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring));
493 539
494 i915_gem_cleanup_hws(dev); 540 i915_gem_object_unpin(ring->gem_object);
541 drm_gem_object_unreference(ring->gem_object);
542 ring->gem_object = NULL;
543 cleanup_status_page(dev, ring);
495} 544}
496 545
497/* As a ringbuffer is only allowed to wrap between instructions, fill 546int intel_wrap_ring_buffer(struct drm_device *dev,
498 * the tail with NOOPs. 547 struct intel_ring_buffer *ring)
499 */
500int i915_wrap_ring(struct drm_device *dev)
501{ 548{
502 drm_i915_private_t *dev_priv = dev->dev_private; 549 unsigned int *virt;
503 volatile unsigned int *virt;
504 int rem; 550 int rem;
551 rem = ring->size - ring->tail;
505 552
506 rem = dev_priv->render_ring.Size - dev_priv->render_ring.tail; 553 if (ring->space < rem) {
507 if (dev_priv->render_ring.space < rem) { 554 int ret = intel_wait_ring_buffer(dev, ring, rem);
508 int ret = i915_wait_ring(dev, rem, __func__);
509 if (ret) 555 if (ret)
510 return ret; 556 return ret;
511 } 557 }
512 dev_priv->render_ring.space -= rem;
513 558
514 virt = (unsigned int *) 559 virt = (unsigned int *)(ring->virtual_start + ring->tail);
515 (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail);
516 rem /= 4; 560 rem /= 4;
517 while (rem--) 561 while (rem--)
518 *virt++ = MI_NOOP; 562 *virt++ = MI_NOOP;
519 563
520 dev_priv->render_ring.tail = 0; 564 ring->tail = 0;
521 565
522 return 0; 566 return 0;
523} 567}
524 568
525int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 569int intel_wait_ring_buffer(struct drm_device *dev,
570 struct intel_ring_buffer *ring, int n)
526{ 571{
527 drm_i915_private_t *dev_priv = dev->dev_private; 572 unsigned long end;
528 drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring);
529 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
530 u32 last_acthd = I915_READ(acthd_reg);
531 u32 acthd;
532 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
533 int i;
534 573
535 trace_i915_ring_wait_begin (dev); 574 trace_i915_ring_wait_begin (dev);
536 575 end = jiffies + 3 * HZ;
537 for (i = 0; i < 100000; i++) { 576 do {
538 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 577 ring->head = ring->get_head(dev, ring);
539 acthd = I915_READ(acthd_reg);
540 ring->space = ring->head - (ring->tail + 8); 578 ring->space = ring->head - (ring->tail + 8);
541 if (ring->space < 0) 579 if (ring->space < 0)
542 ring->space += ring->Size; 580 ring->space += ring->size;
543 if (ring->space >= n) { 581 if (ring->space >= n) {
544 trace_i915_ring_wait_end (dev); 582 trace_i915_ring_wait_end (dev);
545 return 0; 583 return 0;
@@ -550,19 +588,97 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
550 if (master_priv->sarea_priv) 588 if (master_priv->sarea_priv)
551 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 589 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
552 } 590 }
591 yield();
592 } while (!time_after(jiffies, end));
593 trace_i915_ring_wait_end (dev);
594 return -EBUSY;
595}
553 596
597void intel_ring_begin(struct drm_device *dev,
598 struct intel_ring_buffer *ring, int n)
599{
600 if (unlikely(ring->tail + n > ring->size))
601 intel_wrap_ring_buffer(dev, ring);
602 if (unlikely(ring->space < n))
603 intel_wait_ring_buffer(dev, ring, n);
604}
554 605
555 if (ring->head != last_head) 606void intel_ring_emit(struct drm_device *dev,
556 i = 0; 607 struct intel_ring_buffer *ring, unsigned int data)
557 if (acthd != last_acthd) 608{
558 i = 0; 609 unsigned int *virt = ring->virtual_start + ring->tail;
610 *virt = data;
611 ring->tail += 4;
612 ring->tail &= ring->size - 1;
613 ring->space -= 4;
614}
559 615
560 last_head = ring->head; 616void intel_ring_advance(struct drm_device *dev,
561 last_acthd = acthd; 617 struct intel_ring_buffer *ring)
562 msleep_interruptible(10); 618{
619 ring->advance_ring(dev, ring);
620}
563 621
564 } 622void intel_fill_struct(struct drm_device *dev,
623 struct intel_ring_buffer *ring,
624 void *data,
625 unsigned int len)
626{
627 unsigned int *virt = ring->virtual_start + ring->tail;
628 BUG_ON((len&~(4-1)) != 0);
629 intel_ring_begin(dev, ring, len);
630 memcpy(virt, data, len);
631 ring->tail += len;
632 ring->tail &= ring->size - 1;
633 ring->space -= len;
634 intel_ring_advance(dev, ring);
635}
565 636
566 trace_i915_ring_wait_end (dev); 637u32 intel_ring_get_seqno(struct drm_device *dev,
567 return -EBUSY; 638 struct intel_ring_buffer *ring)
639{
640 u32 seqno;
641 seqno = ring->next_seqno;
642
643 /* reserve 0 for non-seqno */
644 if (++ring->next_seqno == 0)
645 ring->next_seqno = 1;
646 return seqno;
568} 647}
648
649struct intel_ring_buffer render_ring = {
650 .name = "render ring",
651 .regs = {
652 .ctl = PRB0_CTL,
653 .head = PRB0_HEAD,
654 .tail = PRB0_TAIL,
655 .start = PRB0_START
656 },
657 .ring_flag = I915_EXEC_RENDER,
658 .size = 32 * PAGE_SIZE,
659 .alignment = PAGE_SIZE,
660 .virtual_start = NULL,
661 .dev = NULL,
662 .gem_object = NULL,
663 .head = 0,
664 .tail = 0,
665 .space = 0,
666 .next_seqno = 1,
667 .user_irq_refcount = 0,
668 .irq_gem_seqno = 0,
669 .waiting_gem_seqno = 0,
670 .setup_status_page = render_setup_status_page,
671 .init = init_render_ring,
672 .get_head = render_ring_get_head,
673 .get_tail = render_ring_get_tail,
674 .get_active_head = render_ring_get_active_head,
675 .advance_ring = render_ring_advance_ring,
676 .flush = render_ring_flush,
677 .add_request = render_ring_add_request,
678 .get_gem_seqno = render_ring_get_gem_seqno,
679 .user_irq_get = render_ring_get_user_irq,
680 .user_irq_put = render_ring_put_user_irq,
681 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
682 .status_page = {NULL, 0, NULL},
683 .map = {0,}
684};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 000000000000..d5568d3766de
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4struct intel_hw_status_page {
5 void *page_addr;
6 unsigned int gfx_addr;
7 struct drm_gem_object *obj;
8};
9
10struct drm_i915_gem_execbuffer2;
11struct intel_ring_buffer {
12 const char *name;
13 struct ring_regs {
14 u32 ctl;
15 u32 head;
16 u32 tail;
17 u32 start;
18 } regs;
19 unsigned int ring_flag;
20 unsigned long size;
21 unsigned int alignment;
22 void *virtual_start;
23 struct drm_device *dev;
24 struct drm_gem_object *gem_object;
25
26 unsigned int head;
27 unsigned int tail;
28 unsigned int space;
29 u32 next_seqno;
30 struct intel_hw_status_page status_page;
31
32 u32 irq_gem_seqno; /* last seq seem at irq time */
33 u32 waiting_gem_seqno;
34 int user_irq_refcount;
35 void (*user_irq_get)(struct drm_device *dev,
36 struct intel_ring_buffer *ring);
37 void (*user_irq_put)(struct drm_device *dev,
38 struct intel_ring_buffer *ring);
39 void (*setup_status_page)(struct drm_device *dev,
40 struct intel_ring_buffer *ring);
41
42 int (*init)(struct drm_device *dev,
43 struct intel_ring_buffer *ring);
44
45 unsigned int (*get_head)(struct drm_device *dev,
46 struct intel_ring_buffer *ring);
47 unsigned int (*get_tail)(struct drm_device *dev,
48 struct intel_ring_buffer *ring);
49 unsigned int (*get_active_head)(struct drm_device *dev,
50 struct intel_ring_buffer *ring);
51 void (*advance_ring)(struct drm_device *dev,
52 struct intel_ring_buffer *ring);
53 void (*flush)(struct drm_device *dev,
54 struct intel_ring_buffer *ring,
55 u32 invalidate_domains,
56 u32 flush_domains);
57 u32 (*add_request)(struct drm_device *dev,
58 struct intel_ring_buffer *ring,
59 struct drm_file *file_priv,
60 u32 flush_domains);
61 u32 (*get_gem_seqno)(struct drm_device *dev,
62 struct intel_ring_buffer *ring);
63 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
64 struct intel_ring_buffer *ring,
65 struct drm_i915_gem_execbuffer2 *exec,
66 struct drm_clip_rect *cliprects,
67 uint64_t exec_offset);
68
69 /**
70 * List of objects currently involved in rendering from the
71 * ringbuffer.
72 *
73 * Includes buffers having the contents of their GPU caches
74 * flushed, not necessarily primitives. last_rendering_seqno
75 * represents when the rendering involved will be completed.
76 *
77 * A reference is held on the buffer while on this list.
78 */
79 struct list_head active_list;
80
81 /**
82 * List of breadcrumbs associated with GPU requests currently
83 * outstanding.
84 */
85 struct list_head request_list;
86
87 wait_queue_head_t irq_queue;
88 drm_local_map_t map;
89};
90
91static inline u32
92intel_read_status_page(struct intel_ring_buffer *ring,
93 int reg)
94{
95 u32 *regs = ring->status_page.page_addr;
96 return regs[reg];
97}
98
99int intel_init_ring_buffer(struct drm_device *dev,
100 struct intel_ring_buffer *ring);
101void intel_cleanup_ring_buffer(struct drm_device *dev,
102 struct intel_ring_buffer *ring);
103int intel_wait_ring_buffer(struct drm_device *dev,
104 struct intel_ring_buffer *ring, int n);
105int intel_wrap_ring_buffer(struct drm_device *dev,
106 struct intel_ring_buffer *ring);
107void intel_ring_begin(struct drm_device *dev,
108 struct intel_ring_buffer *ring, int n);
109void intel_ring_emit(struct drm_device *dev,
110 struct intel_ring_buffer *ring, u32 data);
111void intel_fill_struct(struct drm_device *dev,
112 struct intel_ring_buffer *ring,
113 void *data,
114 unsigned int len);
115void intel_ring_advance(struct drm_device *dev,
116 struct intel_ring_buffer *ring);
117
118u32 intel_ring_get_seqno(struct drm_device *dev,
119 struct intel_ring_buffer *ring);
120
121extern struct intel_ring_buffer render_ring;
122extern struct intel_ring_buffer bsd_ring;
123
124#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7cdf6d..e9168704cabe 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -616,7 +616,9 @@ struct drm_i915_gem_execbuffer2 {
616 __u32 num_cliprects; 616 __u32 num_cliprects;
617 /** This is a struct drm_clip_rect *cliprects */ 617 /** This is a struct drm_clip_rect *cliprects */
618 __u64 cliprects_ptr; 618 __u64 cliprects_ptr;
619 __u64 flags; /* currently unused */ 619#define I915_EXEC_RENDER (1<<0)
620#define I915_EXEC_BSD (1<<1)
621 __u64 flags;
620 __u64 rsvd1; 622 __u64 rsvd1;
621 __u64 rsvd2; 623 __u64 rsvd2;
622}; 624};